You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/03/04 23:40:36 UTC

[01/50] [abbrv] phoenix git commit: PHOENIX-1612 Avoid context classloader issue

Repository: phoenix
Updated Branches:
  refs/heads/calcite 679571b19 -> 026f60b12


PHOENIX-1612 Avoid context classloader issue

Don't create a static HColumnDescriptor to avoid creating an
HBaseConfiguration from a static context. Making this change makes
it possible to run Phoenix via an external jar file in tools like
DBVisualier (instead of having to put Phoenix directly in the
application classpath).


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d058a41c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d058a41c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d058a41c

Branch: refs/heads/calcite
Commit: d058a41c52c6a68c1581eff520c1f9e12486b89c
Parents: b12ddfa
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Wed Jan 28 20:30:33 2015 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Jan 29 11:04:58 2015 +0100

----------------------------------------------------------------------
 .../org/apache/phoenix/query/ConnectionQueryServicesImpl.java    | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d058a41c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d545b2b..97efc43 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -200,8 +200,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private volatile ConcurrentMap<SequenceKey,Sequence> sequenceMap = Maps.newConcurrentMap();
     private KeyValueBuilder kvBuilder;
     
-    private static final HColumnDescriptor defaultColDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
-    
     private PMetaData newEmptyMetaData() {
         long maxSizeBytes = props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
                 QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
@@ -1673,7 +1671,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
     
     private boolean isHColumnProperty(String propName) {
-        return defaultColDescriptor.getValue(propName) != null;
+        return HColumnDescriptor.getDefaultValues().containsKey(propName);
     }
 
     private boolean isHTableProperty(String propName) {


[09/50] [abbrv] phoenix git commit: PHOENIX-514 Support functional indexes (Thomas D'Silva)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 9c5c2cd..a51723b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -37,13 +37,15 @@ import org.apache.phoenix.compile.SubqueryRewriter;
 import org.apache.phoenix.iterate.ParallelIteratorFactory;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.AliasedNode;
-import org.apache.phoenix.parse.HintNode;
-import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.AndParseNode;
 import org.apache.phoenix.parse.BooleanParseNodeVisitor;
 import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.IndexExpressionParseNodeRewriter;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.ParseNodeRewriter;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.TableNode;
 import org.apache.phoenix.query.QueryServices;
@@ -54,9 +56,8 @@ import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.IndexUtil;
 
 import com.google.common.collect.Lists;
@@ -232,7 +233,10 @@ public class QueryOptimizer {
         // Check index state of now potentially updated index table to make sure it's active
         if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) {
             try {
+            	// translate nodes that match expressions that are indexed to the associated column parse node
+                indexSelect = ParseNodeRewriter.rewrite(indexSelect, new  IndexExpressionParseNodeRewriter(index, statement.getConnection()));
                 QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager());
+                
                 QueryPlan plan = compiler.compile();
                 // If query doesn't have where clause and some of columns to project are missing
                 // in the index then we need to get missing columns from main table for each row in

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
index cc65d89..961af20 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
@@ -49,4 +49,26 @@ public class BetweenParseNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		BetweenParseNode other = (BetweenParseNode) obj;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
index 75dfa90..5f649de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
@@ -56,4 +56,26 @@ public class BindParseNode extends NamedParseNode {
         return ":" + index;
     }
 
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + index;
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		BindParseNode other = (BindParseNode) obj;
+		if (index != other.index)
+			return false;
+		return true;
+	}
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
index ea4e587..598a190 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
@@ -104,4 +104,42 @@ public class CastParseNode extends UnaryParseNode {
             throw TypeMismatchException.newException(fromDataType, targetDataType, firstChildExpr.toString());
 	    }
 	}
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + ((dt == null) ? 0 : dt.hashCode());
+		result = prime * result
+				+ ((maxLength == null) ? 0 : maxLength.hashCode());
+		result = prime * result + ((scale == null) ? 0 : scale.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		CastParseNode other = (CastParseNode) obj;
+		if (dt == null) {
+			if (other.dt != null)
+				return false;
+		} else if (!dt.equals(other.dt))
+			return false;
+		if (maxLength == null) {
+			if (other.maxLength != null)
+				return false;
+		} else if (!maxLength.equals(other.maxLength))
+			return false;
+		if (scale == null) {
+			if (other.scale != null)
+				return false;
+		} else if (!scale.equals(other.scale))
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
index 169754c..8032ba5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
@@ -36,7 +36,7 @@ import com.google.common.base.Preconditions;
 /**
  * 
  * Represents a column definition during DDL
- *
+ * 
  * 
  * @since 0.1
  */
@@ -50,9 +50,10 @@ public class ColumnDef {
     private final SortOrder sortOrder;
     private final boolean isArray;
     private final Integer arrSize;
+    private final String expressionStr;
  
     ColumnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, Integer arrSize, Boolean isNull, Integer maxLength,
-    		            Integer scale, boolean isPK, SortOrder sortOrder) {
+    		            Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr) {
    	 try {
          Preconditions.checkNotNull(sortOrder);
    	     PDataType localType = null;
@@ -133,13 +134,14 @@ public class ColumnDef {
          if(this.isArray) {
              this.dataType = localType;
          }
+         this.expressionStr = expressionStr;
      } catch (SQLException e) {
          throw new ParseException(e);
      }
     }
     ColumnDef(ColumnName columnDefName, String sqlTypeName, Boolean isNull, Integer maxLength,
-            Integer scale, boolean isPK, SortOrder sortOrder) {
-    	this(columnDefName, sqlTypeName, false, 0, isNull, maxLength, scale, isPK, sortOrder);
+            Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr) {
+    	this(columnDefName, sqlTypeName, false, 0, isNull, maxLength, scale, isPK, sortOrder, expressionStr);
     }
 
     public ColumnName getColumnDefName() {
@@ -183,4 +185,8 @@ public class ColumnDef {
 	public Integer getArraySize() {
 		return arrSize;
 	}
+
+	public String getExpression() {
+		return expressionStr;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnName.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnName.java
index f613a05..82439ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnName.java
@@ -73,7 +73,7 @@ public class ColumnName {
 
     @Override
     public String toString() {
-        return SchemaUtil.getColumnName(getFamilyName(),getColumnName());
+		return SchemaUtil.getColumnName(getFamilyName(),getColumnName());
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
index 19dbc68..e7489fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
@@ -28,6 +28,7 @@ import org.apache.phoenix.query.QueryConstants;
  * @since 0.1
  */
 public class ColumnParseNode extends NamedParseNode {
+    // table name can also represent a column family 
     private final TableName tableName;
     private final String fullName;
     private final String alias;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
index 053a9cc..e0ab22b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
@@ -31,7 +31,8 @@ import java.util.List;
  * @since 0.1
  */
 public abstract class CompoundParseNode extends ParseNode {
-    private final List<ParseNode> children;
+
+	private final List<ParseNode> children;
     private final boolean isStateless;
     
     CompoundParseNode(List<ParseNode> children) {
@@ -70,4 +71,33 @@ public abstract class CompoundParseNode extends ParseNode {
     public String toString() {
         return this.getClass().getName() + children.toString();
     }
+    
+    @Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result
+				+ ((children == null) ? 0 : children.hashCode());
+		result = prime * result + (isStateless ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		CompoundParseNode other = (CompoundParseNode) obj;
+		if (children == null) {
+			if (other.children != null)
+				return false;
+		} else if (!children.equals(other.children))
+			return false;
+		if (isStateless != other.isStateless)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
index 669dc3f..bf76174 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
@@ -29,7 +29,7 @@ import com.google.common.collect.ListMultimap;
 
 public class CreateIndexStatement extends SingleTableStatement {
     private final TableName indexTableName;
-    private final PrimaryKeyConstraint indexConstraint;
+    private final IndexKeyConstraint indexKeyConstraint;
     private final List<ColumnName> includeColumns;
     private final List<ParseNode> splitNodes;
     private final ListMultimap<String,Pair<String,Object>> props;
@@ -37,11 +37,11 @@ public class CreateIndexStatement extends SingleTableStatement {
     private final IndexType indexType;
 
     public CreateIndexStatement(NamedNode indexTableName, NamedTableNode dataTable, 
-            PrimaryKeyConstraint indexConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
+            IndexKeyConstraint indexKeyConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
             ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType indexType, int bindCount) {
         super(dataTable, bindCount);
         this.indexTableName =TableName.create(dataTable.getName().getSchemaName(),indexTableName.getName());
-        this.indexConstraint = indexConstraint == null ? PrimaryKeyConstraint.EMPTY : indexConstraint;
+        this.indexKeyConstraint = indexKeyConstraint == null ? IndexKeyConstraint.EMPTY : indexKeyConstraint;
         this.includeColumns = includeColumns == null ? Collections.<ColumnName>emptyList() : includeColumns;
         this.splitNodes = splits == null ? Collections.<ParseNode>emptyList() : splits;
         this.props = props == null ? ArrayListMultimap.<String,Pair<String,Object>>create() : props;
@@ -49,8 +49,8 @@ public class CreateIndexStatement extends SingleTableStatement {
         this.indexType = indexType;
     }
 
-    public PrimaryKeyConstraint getIndexConstraint() {
-        return indexConstraint;
+    public IndexKeyConstraint getIndexConstraint() {
+        return indexKeyConstraint;
     }
 
     public List<ColumnName> getIncludeColumns() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
index 45ccdfe..fde7d76 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
@@ -50,4 +50,26 @@ public class ExistsParseNode extends UnaryParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		ExistsParseNode other = (ExistsParseNode) obj;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
index 9cfb345..2c939fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
@@ -49,5 +49,27 @@ public class FamilyWildcardParseNode extends NamedParseNode {
     public boolean isRewrite() {
         return isRewrite;
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (isRewrite ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		FamilyWildcardParseNode other = (FamilyWildcardParseNode) obj;
+		if (isRewrite != other.isRewrite)
+			return false;
+		return true;
+	}
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
index e6ce6d1..c41fa4f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
@@ -428,4 +428,35 @@ public class FunctionParseNode extends CompoundParseNode {
             return allowedValues;
         }
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + ((info == null) ? 0 : info.hashCode());
+		result = prime * result + ((name == null) ? 0 : name.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		FunctionParseNode other = (FunctionParseNode) obj;
+		if (info == null) {
+			if (other.info != null)
+				return false;
+		} else if (!info.equals(other.info))
+			return false;
+		if (name == null) {
+			if (other.name != null)
+				return false;
+		} else if (!name.equals(other.name))
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
index 91f2b5c..fae15f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
@@ -61,4 +61,26 @@ public class InListParseNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		InListParseNode other = (InListParseNode) obj;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
index acd71b1..84984e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
@@ -56,4 +56,29 @@ public class InParseNode extends BinaryParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (isSubqueryDistinct ? 1231 : 1237);
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		InParseNode other = (InParseNode) obj;
+		if (isSubqueryDistinct != other.isSubqueryDistinct)
+			return false;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
new file mode 100644
index 0000000..efa3835
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.phoenix.compile.ColumnResolver;
+import org.apache.phoenix.compile.ExpressionCompiler;
+import org.apache.phoenix.compile.FromCompiler;
+import org.apache.phoenix.compile.IndexStatementRewriter;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.IndexUtil;
+
+import com.google.common.collect.Maps;
+
+/**
+ * Used to replace parse nodes in a SelectStatement that match expressions that are present in an indexed with the
+ * corresponding {@link ColumnParseNode}
+ */
+public class IndexExpressionParseNodeRewriter extends ParseNodeRewriter {
+
+    private final Map<ParseNode, ParseNode> indexedParseNodeToColumnParseNodeMap;
+    
+    private static class ColumnParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor {
+        
+        private boolean isParseNodeCaseSensitive;
+        
+        public void reset() {
+            this.isParseNodeCaseSensitive = false;
+        }
+        
+        @Override
+        public Void visit(ColumnParseNode node) throws SQLException {
+            isParseNodeCaseSensitive = isParseNodeCaseSensitive  || node.isCaseSensitive() || node.isTableNameCaseSensitive();
+            return null;
+        }
+        
+        public boolean isParseNodeCaseSensitive() {
+            return isParseNodeCaseSensitive;
+        }
+        
+    }
+
+    public IndexExpressionParseNodeRewriter(PTable index, PhoenixConnection connection) throws SQLException {
+        indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size());
+        NamedTableNode tableNode = NamedTableNode.create(null,
+                TableName.create(index.getParentSchemaName().getString(), index.getParentTableName().getString()),
+                Collections.<ColumnDef> emptyList());
+        ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection);
+        StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver);
+        IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null);
+        ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
+        ColumnParseNodeVisitor columnParseNodeVisitor = new ColumnParseNodeVisitor();
+        int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1);
+        List<PColumn> pkColumns = index.getPKColumns();
+		for (int i=indexPosOffset; i<pkColumns.size(); ++i) {
+        	PColumn column = pkColumns.get(i);
+            if (column.getExpressionStr()==null) {
+                continue;
+            }
+            ParseNode expressionParseNode = SQLParser.parseCondition(column.getExpressionStr());
+            columnParseNodeVisitor.reset();
+            expressionParseNode.accept(columnParseNodeVisitor);
+            String colName = column.getName().getString();
+            if (columnParseNodeVisitor.isParseNodeCaseSensitive()) {
+                // force column name to be case sensitive name by surround with double quotes
+                colName = "\"" + colName + "\"";
+            }
+            
+            Expression dataExpression = expressionParseNode.accept(expressionCompiler);
+            PDataType expressionDataType = dataExpression.getDataType();
+            ParseNode indexedParseNode = expressionParseNode.accept(rewriter);
+            PDataType indexColType = IndexUtil.getIndexColumnDataType(dataExpression.isNullable(), expressionDataType);
+            ParseNode columnParseNode = new ColumnParseNode(null, colName, null);
+            if ( indexColType != expressionDataType) {
+                columnParseNode = NODE_FACTORY.cast(columnParseNode, expressionDataType, null, null);
+            }
+            indexedParseNodeToColumnParseNodeMap.put(indexedParseNode, columnParseNode);
+        }
+    }
+
+    @Override
+    protected ParseNode leaveCompoundNode(CompoundParseNode node, List<ParseNode> children, CompoundNodeFactory factory) {
+        return indexedParseNodeToColumnParseNodeMap.containsKey(node) ? indexedParseNodeToColumnParseNodeMap.get(node)
+                : super.leaveCompoundNode(node, children, factory);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java
index 7043b9d..ef40c78 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java
@@ -17,21 +17,25 @@
  */
 package org.apache.phoenix.parse;
 
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.ImmutableList;
+
 import org.apache.phoenix.schema.SortOrder;
 
 public class IndexKeyConstraint {
-    private final List<Pair<ColumnParseNode, SortOrder>> columnNameToSortOrder;
+	public static final IndexKeyConstraint EMPTY = new IndexKeyConstraint(Collections.<Pair<ParseNode, SortOrder>>emptyList());
+
+    private final List<Pair<ParseNode, SortOrder>> columnNameToSortOrder;
     
-    IndexKeyConstraint(List<Pair<ColumnParseNode, SortOrder>> columnNameAndSortOrder) {
-        this.columnNameToSortOrder = ImmutableList.copyOf(columnNameAndSortOrder);
+    IndexKeyConstraint(List<Pair<ParseNode, SortOrder>> parseNodeAndSortOrder) {
+        this.columnNameToSortOrder = ImmutableList.copyOf(parseNodeAndSortOrder);
     }
 
-    public List<Pair<ColumnParseNode, SortOrder>> getColumns() {
+    public List<Pair<ParseNode, SortOrder>> getParseNodeAndSortOrderList() {
         return columnNameToSortOrder;
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
index 21d0f8e..614cfd0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
@@ -50,4 +50,26 @@ public class IsNullParseNode extends UnaryParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		IsNullParseNode other = (IsNullParseNode) obj;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
index 9cec70e..41d252d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
@@ -59,4 +59,30 @@ public class LikeParseNode extends BinaryParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result
+				+ ((likeType == null) ? 0 : likeType.hashCode());
+		result = prime * result + (negate ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		LikeParseNode other = (LikeParseNode) obj;
+		if (likeType != other.likeType)
+			return false;
+		if (negate != other.negate)
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
index b83ce23..9e9184f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
@@ -80,4 +80,25 @@ public class LiteralParseNode extends TerminalParseNode {
     public String toString() {
         return type == PVarchar.INSTANCE ? ("'" + value.toString() + "'") : value == null ? "null" : value.toString();
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((type == null) ? 0 : type.hashCode());
+		result = prime * result + ((value == null) ? 0 : value.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		LiteralParseNode other = (LiteralParseNode) obj;
+		return type.isComparableTo(other.type) && type.compareTo(value, other.value, other.type) == 0;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
index e799875..6cfeb60 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
@@ -28,7 +28,7 @@ public class NamedNode {
         return new NamedNode(name,true);
     }
     
-    private NamedNode(String name, boolean isCaseSensitive) {
+    NamedNode(String name, boolean isCaseSensitive) {
         this.name = name;
         this.isCaseSensitive = isCaseSensitive;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
index fa4872f..51da80a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
@@ -35,6 +35,10 @@ public abstract class NamedParseNode extends TerminalParseNode{
     NamedParseNode(String name) {
         this.namedNode = new NamedNode(name);
     }
+    
+    NamedParseNode(String name, boolean isCaseSensitive) {
+        this.namedNode = new NamedNode(name, isCaseSensitive);
+    }
 
     public String getName() {
         return namedNode.getName();
@@ -48,4 +52,30 @@ public abstract class NamedParseNode extends TerminalParseNode{
     public String toString() {
         return getName();
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result
+				+ ((namedNode == null) ? 0 : namedNode.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		NamedParseNode other = (NamedParseNode) obj;
+		if (namedNode == null) {
+			if (other.namedNode != null)
+				return false;
+		} else if (!namedNode.equals(other.namedNode))
+			return false;
+		return true;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 0f40ece..57507b8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -53,16 +53,15 @@ import org.apache.phoenix.util.SchemaUtil;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Maps;
 
-
 /**
- *
+ * 
  * Factory used by parser to construct object model while parsing a SQL statement
- *
+ * 
  * 
  * @since 0.1
  */
 public class ParseNodeFactory {
-    private static final String ARRAY_ELEM = "ARRAY_ELEM";
+	private static final String ARRAY_ELEM = "ARRAY_ELEM";
 	// TODO: Use Google's Reflection library instead to find aggregate functions
     @SuppressWarnings("unchecked")
     private static final List<Class<? extends FunctionExpression>> CLIENT_SIDE_BUILT_IN_FUNCTIONS = Arrays.<Class<? extends FunctionExpression>>asList(
@@ -241,10 +240,10 @@ public class ParseNodeFactory {
         return new StringConcatParseNode(children);
     }
 
-    public ColumnParseNode column(TableName tableName, String name, String alias) {
-        return new ColumnParseNode(tableName,name,alias);
+    public ColumnParseNode column(TableName tableName, String columnName, String alias) {
+        return new ColumnParseNode(tableName, columnName, alias);
     }
-
+    
     public ColumnName columnName(String columnName) {
         return new ColumnName(columnName);
     }
@@ -261,25 +260,29 @@ public class ParseNodeFactory {
         return new PropertyName(familyName, propertyName);
     }
 
-    public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isNull, Integer maxLength, Integer scale, boolean isPK, SortOrder sortOrder) {
-        return new ColumnDef(columnDefName, sqlTypeName, isNull, maxLength, scale, isPK, sortOrder);
+    public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isNull, Integer maxLength, Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr) {
+        return new ColumnDef(columnDefName, sqlTypeName, isNull, maxLength, scale, isPK, sortOrder, expressionStr);
     }
 
     public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, Integer arrSize, Boolean isNull, Integer maxLength, Integer scale, boolean isPK, 
         	SortOrder sortOrder) {
-        return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, isPK, sortOrder);
+        return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, isPK, sortOrder, null);
     }
 
     public PrimaryKeyConstraint primaryKey(String name, List<Pair<ColumnName, SortOrder>> columnNameAndSortOrder) {
         return new PrimaryKeyConstraint(name, columnNameAndSortOrder);
     }
+    
+    public IndexKeyConstraint indexKey( List<Pair<ParseNode, SortOrder>> parseNodeAndSortOrder) {
+        return new IndexKeyConstraint(parseNodeAndSortOrder);
+    }
 
     public CreateTableStatement createTable(TableName tableName, ListMultimap<String,Pair<String,Object>> props, List<ColumnDef> columns, PrimaryKeyConstraint pkConstraint, List<ParseNode> splits, PTableType tableType, boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount) {
         return new CreateTableStatement(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, baseTableName, tableTypeIdNode, bindCount);
     }
 
-    public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, PrimaryKeyConstraint pkConstraint, List<ColumnName> includeColumns, List<ParseNode> splits, ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType indexType, int bindCount) {
-        return new CreateIndexStatement(indexName, dataTable, pkConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
+    public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, IndexKeyConstraint ikConstraint, List<ColumnName> includeColumns, List<ParseNode> splits, ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType indexType, int bindCount) {
+        return new CreateIndexStatement(indexName, dataTable, ikConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
     }
 
     public CreateSequenceStatement createSequence(TableName tableName, ParseNode startsWith,
@@ -599,7 +602,12 @@ public class ParseNodeFactory {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), having,
                 statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
     }
-
+    
+    public SelectStatement select(SelectStatement statement, List<AliasedNode> select, ParseNode where, List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy) {
+        return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), 
+                select, where, groupBy, having, orderBy, statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+    }
+    
     public SelectStatement select(SelectStatement statement, TableNode table) {
         return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), statement.getWhere(), statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
index f29d79e..260584f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
@@ -60,4 +60,33 @@ public class SequenceValueParseNode extends TerminalParseNode {
     public Op getOp() {
         return op;
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((op == null) ? 0 : op.hashCode());
+		result = prime * result
+				+ ((tableName == null) ? 0 : tableName.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		SequenceValueParseNode other = (SequenceValueParseNode) obj;
+		if (op != other.op)
+			return false;
+		if (tableName == null) {
+			if (other.tableName != null)
+				return false;
+		} else if (!tableName.equals(other.tableName))
+			return false;
+		return true;
+	}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
index 92c5284..b7bcb64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
@@ -49,5 +49,33 @@ public class SubqueryParseNode extends TerminalParseNode {
     public <T> T accept(ParseNodeVisitor<T> visitor) throws SQLException {
         return visitor.visit(this);
     }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + (expectSingleRow ? 1231 : 1237);
+		result = prime * result + ((select == null) ? 0 : select.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		SubqueryParseNode other = (SubqueryParseNode) obj;
+		if (expectSingleRow != other.expectSingleRow)
+			return false;
+		if (select == null) {
+			if (other.select != null)
+				return false;
+		} else if (!select.equals(other.select))
+			return false;
+		return true;
+	}
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
index 9717067..654e899 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
@@ -21,6 +21,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.SchemaUtil;
 
 public class TableName {
+
     private final String tableName;
     private final String schemaName;
     private final boolean isTableNameCaseSensitive;
@@ -61,7 +62,7 @@ public class TableName {
     public String toString() {
         return (schemaName == null ? "" : schemaName + QueryConstants.NAME_SEPARATOR)  + tableName;
     }
-
+    
 	@Override
 	public int hashCode() {
 		final int prime = 31;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
index 768ba5d..7292347 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
@@ -46,5 +46,34 @@ public class TableWildcardParseNode extends NamedParseNode {
         return visitor.visit(this);
     }
 
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = super.hashCode();
+		result = prime * result + (isRewrite ? 1231 : 1237);
+		result = prime * result
+				+ ((tableName == null) ? 0 : tableName.hashCode());
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (!super.equals(obj))
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		TableWildcardParseNode other = (TableWildcardParseNode) obj;
+		if (isRewrite != other.isRewrite)
+			return false;
+		if (tableName == null) {
+			if (other.tableName != null)
+				return false;
+		} else if (!tableName.equals(other.tableName))
+			return false;
+		return true;
+	}
+
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
index 59feeb5..fdfb64f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
@@ -51,6 +51,28 @@ public class WildcardParseNode extends TerminalParseNode {
 
     public boolean isRewrite() {
         return isRewrite;
-    }    
+    }
+
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + (isRewrite ? 1231 : 1237);
+		return result;
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (getClass() != obj.getClass())
+			return false;
+		WildcardParseNode other = (WildcardParseNode) obj;
+		if (isRewrite != other.isRewrite)
+			return false;
+		return true;
+	}    
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
index be85635..6c6bcf7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.util.SizedUtil;
 
 public class DelegateColumn extends DelegateDatum implements PColumn {
@@ -73,4 +74,9 @@ public class DelegateColumn extends DelegateDatum implements PColumn {
     public boolean isViewReferenced() {
         return getDelegate().isViewReferenced();
     }
+    
+    @Override
+    public String getExpressionStr() {
+        return getDelegate().getExpressionStr();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
index 38aac31..b719aae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.stats.PTableStats;
 
 public class DelegateTable implements PTable {
@@ -161,13 +162,13 @@ public class DelegateTable implements PTable {
     }
 
     @Override
-    public void getIndexMaintainers(ImmutableBytesWritable ptr) {
-        delegate.getIndexMaintainers(ptr);
+    public void getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) {
+        delegate.getIndexMaintainers(ptr, connection);
     }
 
     @Override
-    public IndexMaintainer getIndexMaintainer(PTable dataTable) {
-        return delegate.getIndexMaintainer(dataTable);
+    public IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) {
+        return delegate.getIndexMaintainer(dataTable, connection);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 5791c82..09d2f66 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
 import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
@@ -102,11 +103,13 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.ExplainPlan;
 import org.apache.phoenix.compile.FromCompiler;
+import org.apache.phoenix.compile.IndexExpressionCompiler;
 import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.compile.PostDDLCompiler;
 import org.apache.phoenix.compile.PostIndexDDLCompiler;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.compile.StatementNormalizer;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
@@ -114,6 +117,9 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.expression.Determinism;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -131,7 +137,9 @@ import org.apache.phoenix.parse.DropColumnStatement;
 import org.apache.phoenix.parse.DropIndexStatement;
 import org.apache.phoenix.parse.DropSequenceStatement;
 import org.apache.phoenix.parse.DropTableStatement;
+import org.apache.phoenix.parse.IndexKeyConstraint;
 import org.apache.phoenix.parse.NamedTableNode;
+import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.PrimaryKeyConstraint;
 import org.apache.phoenix.parse.TableName;
@@ -257,8 +265,9 @@ public class MetaDataClient {
         VIEW_CONSTANT + "," +
         IS_VIEW_REFERENCED + "," +
         PK_NAME + "," +  // write this both in the column and table rows for access by metadata APIs
-        KEY_SEQ +
-        ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+        KEY_SEQ + "," +
+        COLUMN_DEF +
+        ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
     private static final String UPDATE_COLUMN_POSITION =
         "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" ( " +
         TENANT_ID + "," +
@@ -322,6 +331,7 @@ public class MetaDataClient {
             table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
             tableTimestamp = table.getTimeStamp();
         } catch (TableNotFoundException e) {
+            System.err.println(e);
             // TODO: Try again on services cache, as we may be looking for
             // a global multi-tenant table
         }
@@ -499,6 +509,11 @@ public class MetaDataClient {
         } else {
             colUpsert.setShort(17, keySeq);
         }
+        if (column.getExpressionStr() == null) {
+            colUpsert.setNull(18, Types.VARCHAR);
+        } else {
+            colUpsert.setString(18, column.getExpressionStr());
+        }
         colUpsert.execute();
     }
 
@@ -545,7 +560,7 @@ public class MetaDataClient {
             }
 
             PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, def.getDataType(),
-                    def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), null, false);
+                    def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), null, false, def.getExpression());
             return column;
         } catch (IllegalArgumentException e) { // Based on precondition check in constructor
             throw new SQLException(e);
@@ -762,11 +777,11 @@ public class MetaDataClient {
                 List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
                 // Only build newly created index.
                 indexes.add(index);
-                IndexMaintainer.serialize(dataTable, ptr, indexes);
+                IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
                 scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD, ByteUtil.copyKeyBytesIfNecessary(ptr));
                 // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
                 // However, in this case, we need to project all of the data columns that contribute to the index.
-                IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable);
+                IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
                 for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
                     scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
                 }
@@ -884,10 +899,10 @@ public class MetaDataClient {
      * @throws SQLException
      */
     public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
-        PrimaryKeyConstraint pk = statement.getIndexConstraint();
+        IndexKeyConstraint ik = statement.getIndexConstraint();
         TableName indexTableName = statement.getIndexTableName();
-
-        List<Pair<ColumnName, SortOrder>> indexedPkColumns = pk.getColumnNames();
+        
+        List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
         List<ColumnName> includedColumns = statement.getIncludeColumns();
         TableRef tableRef = null;
         PTable table = null;
@@ -915,24 +930,30 @@ public class MetaDataClient {
                     }
                 }
                 int posOffset = 0;
-                Set<PColumn> unusedPkColumns;
+                List<PColumn> pkColumns = dataTable.getPKColumns();
+                Set<RowKeyColumnExpression> unusedPkColumns;
                 if (dataTable.getBucketNum() != null) { // Ignore SALT column
-                    unusedPkColumns = new LinkedHashSet<PColumn>(dataTable.getPKColumns().subList(1, dataTable.getPKColumns().size()));
-                    posOffset++;
+                	unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()-1);
+                	posOffset++;
                 } else {
-                    unusedPkColumns = new LinkedHashSet<PColumn>(dataTable.getPKColumns());
+                	unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
+                }
+                for (int i = posOffset; i < pkColumns.size(); i++) {
+                    PColumn column = pkColumns.get(i);
+					unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\""+column.getName().getString()+"\""));
                 }
                 List<Pair<ColumnName, SortOrder>> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
-                List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexedPkColumns.size());
-
+                List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
+                
                 if (dataTable.isMultiTenant()) {
                     // Add tenant ID column as first column in index
                     PColumn col = dataTable.getPKColumns().get(posOffset);
-                    unusedPkColumns.remove(col);
+                    RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
+					unusedPkColumns.remove(columnExpression);
                     PDataType dataType = IndexUtil.getIndexColumnDataType(col);
                     ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                     allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, col.getSortOrder()));
-                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault()));
+                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString()));
                 }
                 /*
                  * Allocate an index ID in two circumstances:
@@ -945,55 +966,81 @@ public class MetaDataClient {
                     PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                     ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
                     allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, SortOrder.getDefault()));
-                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault()));
+                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null));
                 }
-                // First columns are the indexed ones
-                for (Pair<ColumnName, SortOrder> pair : indexedPkColumns) {
-                    ColumnName colName = pair.getFirst();
-                    PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
-                    unusedPkColumns.remove(col);
-                    // Ignore view constants for updatable views as we don't need these in the index
-                    if (col.getViewConstant() == null) {
-                        PDataType dataType = IndexUtil.getIndexColumnDataType(col);
-                        colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
-                        allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, pair.getSecond()));
-                        columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault()));
-                    }
+                
+                PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
+                StatementContext context = new StatementContext(phoenixStatment, resolver);
+                IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
+                Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
+                for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
+                	ParseNode parseNode = pair.getFirst();
+                    // normalize the parse node
+                    parseNode = StatementNormalizer.normalize(parseNode, resolver);
+                    // compile the parseNode to get an expression
+                    expressionIndexCompiler.reset();
+                    Expression expression = parseNode.accept(expressionIndexCompiler);   
+                    if (expressionIndexCompiler.isAggregate()) {
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
+                    }
+                    if (expression.getDeterminism() != Determinism.ALWAYS) {
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
+                    }
+                    // true for any constant (including a view constant), as we don't need these in the index
+                    if (expression.isStateless()) {
+                        continue;
+                    }
+                    unusedPkColumns.remove(expression);
+                    
+                    ColumnName colName = null;
+                    ColumnRef colRef = expressionIndexCompiler.getColumnRef();
+					if (colRef!=null) { 
+						// if this is a regular column
+					    PColumn column = colRef.getColumn();
+					    String columnFamilyName = column.getFamilyName()!=null ? column.getFamilyName().getString() : null;
+					    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
+					}
+					else { 
+						// if this is an expression
+					    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
+						String name = expression.toString().replaceAll("\"", "'");
+                        colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
+					}
+					indexedColumnNames.add(colName);
+                	PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
+                    allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, pair.getSecond()));
+                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expression.toString()));
                 }
 
                 // Next all the PK columns from the data table that aren't indexed
                 if (!unusedPkColumns.isEmpty()) {
-                    for (PColumn col : unusedPkColumns) {
+                    for (RowKeyColumnExpression colExpression : unusedPkColumns) {
+                        PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
                         // Don't add columns with constant values from updatable views, as
                         // we don't need these in the index
                         if (col.getViewConstant() == null) {
                             ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
-                            allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, col.getSortOrder()));
-                            PDataType dataType = IndexUtil.getIndexColumnDataType(col);
-                            columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder()));
+                            allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, colExpression.getSortOrder()));
+                            PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType());
+                            columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(),
+                                    colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(),
+                                    false, colExpression.getSortOrder(), colExpression.toString()));
                         }
                     }
                 }
-                pk = FACTORY.primaryKey(null, allPkColumns);
-
+                
                 // Last all the included columns (minus any PK columns)
                 for (ColumnName colName : includedColumns) {
                     PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
-                    if (SchemaUtil.isPKColumn(col)) {
-                        if (!unusedPkColumns.contains(col)) {
-                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
-                        }
-                    } else {
-                        colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
-                        // Check for duplicates between indexed and included columns
-                        if (pk.contains(colName)) {
-                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
-                        }
-                        if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
-                            // Need to re-create ColumnName, since the above one won't have the column family name
-                            colName = ColumnName.caseSensitiveColumnName(col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
-                            columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder()));
-                        }
+                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
+                    // Check for duplicates between indexed and included columns
+                    if (indexedColumnNames.contains(colName)) {
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
+                    }
+                    if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
+                        // Need to re-create ColumnName, since the above one won't have the column family name
+                        colName = ColumnName.caseSensitiveColumnName(col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
+                        columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), null));
                     }
                 }
 
@@ -1030,6 +1077,7 @@ public class MetaDataClient {
                 if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && indexId == null) {
                     statement.getProps().put("", new Pair<String,Object>(DEFAULT_COLUMN_FAMILY_NAME,dataTable.getDefaultFamilyName().getString()));
                 }
+                PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
                 CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount());
                 table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, indexId, statement.getIndexType());
                 break;
@@ -2051,7 +2099,7 @@ public class MetaDataClient {
                     }
                 }
 
-                boolean isAddingPKColumn = false;
+                int numPkColumnsAdded = 0;
                 PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
 
                 List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnDefs.size());
@@ -2077,7 +2125,7 @@ public class MetaDataClient {
 
                         // TODO: support setting properties on other families?
                         if (column.getFamilyName() == null) {
-                            isAddingPKColumn = true;
+                            ++numPkColumnsAdded;
                             pkName = table.getPKName() == null ? null : table.getPKName().getString();
                             keySeq = ++nextKeySeq;
                         } else {
@@ -2088,15 +2136,26 @@ public class MetaDataClient {
                     }
 
                     // Add any new PK columns to end of index PK
-                    if (isAddingPKColumn) {
+                    if (numPkColumnsAdded>0) {
+                    	// create PK column list that includes the newly created columns
+                    	List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()+numPkColumnsAdded);
+                    	pkColumns.addAll(table.getPKColumns());
+                    	for (int i=0; i<columnDefs.size(); ++i) {
+                    		if (columnDefs.get(i).isPK()) {
+                    			pkColumns.add(columns.get(i));
+                    		}
+                    	}
+                    	int pkSlotPosition = table.getPKColumns().size()-1;
                         for (PTable index : table.getIndexes()) {
                             short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index);
                             int indexPosition = index.getColumns().size();
-                            for (ColumnDef colDef : columnDefs) {
+                            for (int i=0; i<columnDefs.size(); ++i) {
+                            	ColumnDef colDef = columnDefs.get(i);
                                 if (colDef.isPK()) {
                                     PDataType indexColDataType = IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
                                     ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
-                                    ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder());
+                                    Expression expression = new RowKeyColumnExpression(columns.get(i), new RowKeyValueAccessor(pkColumns, ++pkSlotPosition));
+                                    ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder(), expression.toString());
                                     PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true);
                                     addColumnMutation(schemaName, index.getTableName().getString(), indexColumn, colUpsert, index.getParentTableName().getString(), index.getPKName() == null ? null : index.getPKName().getString(), ++nextIndexKeySeq, index.getBucketNum() != null);
                                 }
@@ -2124,7 +2183,7 @@ public class MetaDataClient {
                     }
                 }
 
-                if (isAddingPKColumn && !table.getIndexes().isEmpty()) {
+                if (numPkColumnsAdded>0 && !table.getIndexes().isEmpty()) {
                     for (PTable index : table.getIndexes()) {
                         incrementTableSeqNum(index, index.getType(), 1);
                     }
@@ -2172,7 +2231,7 @@ public class MetaDataClient {
 
                     // Only update client side cache if we aren't adding a PK column to a table with indexes.
                     // We could update the cache manually then too, it'd just be a pain.
-                    if (!isAddingPKColumn || table.getIndexes().isEmpty()) {
+                    if (numPkColumnsAdded==0 || table.getIndexes().isEmpty()) {
                         connection.addColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), columns, result.getMutationTime(), seqNum, isImmutableRows == null ? table.isImmutableRows() : isImmutableRows, disableWAL == null ? table.isWALDisabled() : disableWAL, multiTenant == null ? table.isMultiTenant() : multiTenant, storeNulls == null ? table.getStoreNulls() : storeNulls);
                     }
                     // Delete rows in view index if we haven't dropped it already

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
index 54eeaf0..fbc737c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.schema;
 
+
 /**
  * Definition of a Phoenix column
  *
@@ -50,4 +51,6 @@ public interface PColumn extends PDatum {
     boolean isViewReferenced();
     
     int getEstimatedSize();
+    
+    String getExpressionStr();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index 47963c2..11cc53d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -37,6 +37,7 @@ public class PColumnImpl implements PColumn {
     private Integer arraySize;
     private byte[] viewConstant;
     private boolean isViewReferenced;
+    private String expressionStr;
 
     public PColumnImpl() {
     }
@@ -48,13 +49,13 @@ public class PColumnImpl implements PColumn {
                        Integer scale,
                        boolean nullable,
                        int position,
-                       SortOrder sortOrder, Integer arrSize, byte[] viewConstant, boolean isViewReferenced) {
-        init(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, viewConstant, isViewReferenced);
+                       SortOrder sortOrder, Integer arrSize, byte[] viewConstant, boolean isViewReferenced, String expressionStr) {
+        init(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, viewConstant, isViewReferenced, expressionStr);
     }
 
     public PColumnImpl(PColumn column, int position) {
         this(column.getName(), column.getFamilyName(), column.getDataType(), column.getMaxLength(),
-                column.getScale(), column.isNullable(), position, column.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced());
+                column.getScale(), column.isNullable(), position, column.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr());
     }
 
     private void init(PName name,
@@ -66,7 +67,7 @@ public class PColumnImpl implements PColumn {
             int position,
             SortOrder sortOrder,
             Integer arrSize,
-            byte[] viewConstant, boolean isViewReferenced) {
+            byte[] viewConstant, boolean isViewReferenced, String expressionStr) {
     	Preconditions.checkNotNull(sortOrder);
         this.dataType = dataType;
         if (familyName == null) {
@@ -88,6 +89,7 @@ public class PColumnImpl implements PColumn {
         this.arraySize = arrSize;
         this.viewConstant = viewConstant;
         this.isViewReferenced = isViewReferenced;
+        this.expressionStr = expressionStr;
     }
 
     @Override
@@ -121,6 +123,11 @@ public class PColumnImpl implements PColumn {
     public Integer getScale() {
         return scale;
     }
+    
+    @Override
+    public String getExpressionStr() {
+        return expressionStr;
+    }
 
     @Override
     public boolean isNullable() {
@@ -221,9 +228,12 @@ public class PColumnImpl implements PColumn {
         if (column.hasViewReferenced()) {
             isViewReferenced = column.getViewReferenced();
         }
-
+        String expressionStr = null;
+        if (column.hasExpression()) {
+	        expressionStr = column.getExpression();
+        }
         return new PColumnImpl(columnName, familyName, dataType, maxLength, scale, nullable, position, sortOrder,
-                arraySize, viewConstant, isViewReferenced);
+                arraySize, viewConstant, isViewReferenced, expressionStr);
     }
 
     public static PTableProtos.PColumn toProto(PColumn column) {
@@ -249,6 +259,10 @@ public class PColumnImpl implements PColumn {
             builder.setViewConstant(HBaseZeroCopyByteString.wrap(column.getViewConstant()));
         }
         builder.setViewReferenced(column.isViewReferenced());
+        
+        if (column.getExpressionStr() != null) {
+            builder.setExpression(column.getExpressionStr());
+        }
         return builder.build();
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index d3f4273..2f84c95 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -393,7 +393,7 @@ public class PMetaDataImpl implements PMetaData {
             // Update position of columns that follow removed column
             for (int i = position+1; i < oldColumns.size(); i++) {
                 PColumn oldColumn = oldColumns.get(i);
-                PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced());
+                PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced(), null);
                 columns.add(newColumn);
             }
             

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index ee4bebc..d0fea88 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.stats.PTableStats;
 
 
@@ -208,7 +209,7 @@ public interface PTable {
      * @throws AmbiguousColumnException if multiple columns are found with the given name
      */
     PColumn getColumn(String name) throws ColumnNotFoundException, AmbiguousColumnException;
-
+    
     /**
      * Get the PK column with the given name.
      * @param name the column name
@@ -306,8 +307,8 @@ public interface PTable {
     PName getPhysicalName();
     boolean isImmutableRows();
 
-    void getIndexMaintainers(ImmutableBytesWritable ptr);
-    IndexMaintainer getIndexMaintainer(PTable dataTable);
+    void getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection);
+    IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection);
     PName getDefaultFamilyName();
 
     boolean isWALDisabled();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index acce857..08f74b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -46,6 +46,7 @@ import org.apache.phoenix.coprocessor.generated.PTableProtos;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.protobuf.ProtobufUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
@@ -324,15 +325,16 @@ public class PTableImpl implements PTable {
         this.tableStats = stats;
         List<PColumn> pkColumns;
         PColumn[] allColumns;
-
+        
         this.columnsByName = ArrayListMultimap.create(columns.size(), 1);
+        int numPKColumns = 0;
         if (bucketNum != null) {
             // Add salt column to allColumns and pkColumns, but don't add to
             // columnsByName, since it should not be addressable via name.
             allColumns = new PColumn[columns.size()+1];
             allColumns[SALTING_COLUMN.getPosition()] = SALTING_COLUMN;
             pkColumns = Lists.newArrayListWithExpectedSize(columns.size()+1);
-            pkColumns.add(SALTING_COLUMN);
+            ++numPKColumns;
         } else {
             allColumns = new PColumn[columns.size()];
             pkColumns = Lists.newArrayListWithExpectedSize(columns.size());
@@ -342,7 +344,7 @@ public class PTableImpl implements PTable {
             allColumns[column.getPosition()] = column;
             PName familyName = column.getFamilyName();
             if (familyName == null) {
-                pkColumns.add(column);
+                ++numPKColumns;
             }
             String columnName = column.getName().getString();
             if (columnsByName.put(columnName, column)) {
@@ -360,19 +362,21 @@ public class PTableImpl implements PTable {
         estimatedSize += SizedUtil.sizeOfMap(allColumns.length, SizedUtil.POINTER_SIZE, SizedUtil.sizeOfArrayList(1)); // for multi-map
 
         this.bucketNum = bucketNum;
-        this.pkColumns = ImmutableList.copyOf(pkColumns);
         this.allColumns = ImmutableList.copyOf(allColumns);
-        estimatedSize += SizedUtil.sizeOfMap(pkColumns.size()) + SizedUtil.sizeOfMap(allColumns.length);
+        estimatedSize += SizedUtil.sizeOfMap(numPKColumns) + SizedUtil.sizeOfMap(allColumns.length);
 
-        RowKeySchemaBuilder builder = new RowKeySchemaBuilder(pkColumns.size());
+        RowKeySchemaBuilder builder = new RowKeySchemaBuilder(numPKColumns);
         // Two pass so that column order in column families matches overall column order
         // and to ensure that column family order is constant
-        int maxExpectedSize = allColumns.length - pkColumns.size();
+        int maxExpectedSize = allColumns.length - numPKColumns;
         // Maintain iteration order so that column families are ordered as they are listed
         Map<PName, List<PColumn>> familyMap = Maps.newLinkedHashMap();
         for (PColumn column : allColumns) {
             PName familyName = column.getFamilyName();
             if (familyName == null) {
+            	 pkColumns.add(column);
+            }
+            if (familyName == null) {
                 estimatedSize += column.getEstimatedSize(); // PK columns
                 builder.addField(column, column.isNullable(), column.getSortOrder());
             } else {
@@ -384,6 +388,7 @@ public class PTableImpl implements PTable {
                 columnsInFamily.add(column);
             }
         }
+        this.pkColumns = ImmutableList.copyOf(pkColumns);
         this.rowKeySchema = builder.build();
         estimatedSize += rowKeySchema.getEstimatedSize();
         Iterator<Map.Entry<PName,List<PColumn>>> iterator = familyMap.entrySet().iterator();
@@ -804,21 +809,21 @@ public class PTableImpl implements PTable {
     }
 
     @Override
-    public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable) {
+    public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) {
         if (indexMaintainer == null) {
-            indexMaintainer = IndexMaintainer.create(dataTable, this);
+            indexMaintainer = IndexMaintainer.create(dataTable, this, connection);
         }
         return indexMaintainer;
     }
 
     @Override
-    public synchronized void getIndexMaintainers(ImmutableBytesWritable ptr) {
+    public synchronized void getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) {
         if (indexMaintainersPtr == null) {
             indexMaintainersPtr = new ImmutableBytesWritable();
             if (indexes.isEmpty()) {
                 indexMaintainersPtr.set(ByteUtil.EMPTY_BYTE_ARRAY);
             } else {
-                IndexMaintainer.serialize(this, indexMaintainersPtr);
+                IndexMaintainer.serialize(this, indexMaintainersPtr, connection);
             }
         }
         ptr.set(indexMaintainersPtr.get(), indexMaintainersPtr.getOffset(), indexMaintainersPtr.getLength());


[34/50] [abbrv] phoenix git commit: PHOENIX-1639 Enhance function/expression index tests

Posted by ma...@apache.org.
PHOENIX-1639 Enhance function/expression index tests


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2e5a6308
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2e5a6308
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2e5a6308

Branch: refs/heads/calcite
Commit: 2e5a63089f98c21e8556fae0ec80988079e3ca55
Parents: b8c0559
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Tue Feb 17 12:32:55 2015 -0800
Committer: Thomas D'Silva <tw...@gmail.com>
Committed: Tue Feb 17 12:32:55 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    |  58 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |   4 +-
 .../java/org/apache/phoenix/end2end/ViewIT.java |   5 +
 .../end2end/index/IndexExpressionIT.java        | 714 +++++++++++++++----
 .../coprocessor/MetaDataEndpointImpl.java       |   5 +-
 .../phoenix/exception/SQLExceptionCode.java     |   5 +-
 .../apache/phoenix/index/IndexMaintainer.java   |   4 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  33 +-
 .../phoenix/compile/QueryCompilerTest.java      |  72 ++
 9 files changed, 715 insertions(+), 185 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 7f5649b..59698d6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -308,7 +308,6 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
 
     @Test
     public void testDropCoveredColumn() throws Exception {
-        String query;
         ResultSet rs;
         PreparedStatement stmt;
 
@@ -320,19 +319,21 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
         conn.createStatement().execute(
           "CREATE TABLE " + DATA_TABLE_FULL_NAME
               + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR)");
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
+        String dataTableQuery = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(dataTableQuery);
         assertFalse(rs.next());
 
         conn.createStatement().execute(
           "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) include (v2, v3)");
         conn.createStatement().execute(
             "CREATE LOCAL INDEX " + LOCAL_INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) include (v2, v3)");
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
+        rs = conn.createStatement().executeQuery(dataTableQuery);
         assertFalse(rs.next());
-        query = "SELECT * FROM " + LOCAL_INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
+        String indexTableQuery = "SELECT * FROM " + INDEX_TABLE_NAME;
+        rs = conn.createStatement().executeQuery(indexTableQuery);
+        assertFalse(rs.next());
+        String localIndexTableQuery = "SELECT * FROM " + LOCAL_INDEX_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(localIndexTableQuery);
         assertFalse(rs.next());
 
         // load some data into the table
@@ -346,16 +347,31 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
 
         assertIndexExists(conn,true);
         conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " DROP COLUMN v2");
-        // TODO: verify meta data that we get back to confirm our column was dropped
         assertIndexExists(conn,true);
 
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
+        // verify data table rows
+        rs = conn.createStatement().executeQuery(dataTableQuery);
         assertTrue(rs.next());
         assertEquals("a",rs.getString(1));
         assertEquals("x",rs.getString(2));
         assertEquals("j",rs.getString(3));
         assertFalse(rs.next());
+        
+        // verify index table rows
+        rs = conn.createStatement().executeQuery(indexTableQuery);
+        assertTrue(rs.next());
+        assertEquals("x",rs.getString(1));
+        assertEquals("a",rs.getString(2));
+        assertEquals("j",rs.getString(3));
+        assertFalse(rs.next());
+        
+        // verify local index table rows
+        rs = conn.createStatement().executeQuery(localIndexTableQuery);
+        assertTrue(rs.next());
+        assertEquals("x",rs.getString(1));
+        assertEquals("a",rs.getString(2));
+        assertEquals("j",rs.getString(3));
+        assertFalse(rs.next());
 
         // load some data into the table
         stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
@@ -365,13 +381,29 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
         stmt.execute();
         conn.commit();
 
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
+        // verify data table rows
+        rs = conn.createStatement().executeQuery(dataTableQuery);
         assertTrue(rs.next());
         assertEquals("a",rs.getString(1));
         assertEquals("y",rs.getString(2));
         assertEquals("k",rs.getString(3));
         assertFalse(rs.next());
+        
+        // verify index table rows
+        rs = conn.createStatement().executeQuery(indexTableQuery);
+        assertTrue(rs.next());
+        assertEquals("y",rs.getString(1));
+        assertEquals("a",rs.getString(2));
+        assertEquals("k",rs.getString(3));
+        assertFalse(rs.next());
+        
+        // verify local index table rows
+        rs = conn.createStatement().executeQuery(localIndexTableQuery);
+        assertTrue(rs.next());
+        assertEquals("y",rs.getString(1));
+        assertEquals("a",rs.getString(2));
+        assertEquals("k",rs.getString(3));
+        assertFalse(rs.next());
     }
 
     @Test
@@ -427,8 +459,6 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
         assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K2",rs.getString("COLUMN_NAME"));
         assertEquals(3, rs.getShort("KEY_SEQ"));
 
-        assertIndexExists(conn,true);
-
         query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
         rs = conn.createStatement().executeQuery(query);
         assertTrue(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index dc8e768..19d011f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -130,7 +130,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         String queryPlan = QueryUtil.getExplainPlan(rs);
         if (localIndex) {
-            assertEquals("CLIENT PARALLEL 3-WAY RANGE SCAN OVER _LOCAL_IDX_T [-32768,51]\n"
+            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER _LOCAL_IDX_T [-32768,51]\n"
                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT",
                 queryPlan);
@@ -166,7 +166,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
         assertFalse(rs.next());
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         if (localIndex) {
-            assertEquals("CLIENT PARALLEL 3-WAY RANGE SCAN OVER _LOCAL_IDX_T [" + (Short.MIN_VALUE+1) + ",'foo']\n"
+            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER _LOCAL_IDX_T [" + (Short.MIN_VALUE+1) + ",'foo']\n"
                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
         } else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 003db4c..266438d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -109,6 +109,11 @@ public class ViewIT extends BaseViewIT {
     }
     
     @Test
+    public void testNonSaltedUpdatableViewWithLocalIndex() throws Exception {
+        testUpdatableViewWithIndex(null, true);
+    }
+    
+    @Test
     public void testUpdatableOnUpdatableView() throws Exception {
         testUpdatableView(null);
         Connection conn = DriverManager.getConnection(getUrl());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 28124b6..5c51bda 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -15,6 +15,7 @@ import static org.apache.phoenix.util.TestUtil.MUTABLE_INDEX_DATA_TABLE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -30,8 +31,10 @@ import java.util.Properties;
 import org.apache.commons.lang.StringUtils;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.junit.Test;
@@ -41,22 +44,22 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
     private static final int NUM_MILLIS_IN_DAY = 86400000;
 
     @Test
-    public void testImmutableIndexCreationAndUpdate() throws Exception {
+    public void testImmutableIndexCreateAndUpdate() throws Exception {
         helpTestCreateAndUpdate(false, false);
     }
 
     @Test
-    public void testImmutableLocalIndexCreationAndUpdate() throws Exception {
+    public void testImmutableLocalIndexCreateAndUpdate() throws Exception {
         helpTestCreateAndUpdate(false, true);
     }
 
     @Test
-    public void testMutableIndexCreationAndUpdate() throws Exception {
+    public void testMutableIndexCreateAndUpdate() throws Exception {
         helpTestCreateAndUpdate(true, false);
     }
 
     @Test
-    public void testMutableLocalIndexCreationAndUpdate() throws Exception {
+    public void testMutableLocalIndexCreateAndUpdate() throws Exception {
         helpTestCreateAndUpdate(true, true);
     }
 
@@ -72,20 +75,20 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
         stmt.setString(2, "char" + String.valueOf(i));
         stmt.setInt(3, i);
         stmt.setLong(4, i);
-        stmt.setBigDecimal(5, new BigDecimal(Double.valueOf(i)));
+        stmt.setBigDecimal(5, new BigDecimal(i*0.5d));
         Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * NUM_MILLIS_IN_DAY);
         stmt.setDate(6, date);
         stmt.setString(7, "a.varchar" + String.valueOf(i));
         stmt.setString(8, "a.char" + String.valueOf(i));
         stmt.setInt(9, i);
         stmt.setLong(10, i);
-        stmt.setBigDecimal(11, new BigDecimal((double)i));
+        stmt.setBigDecimal(11, new BigDecimal(i*0.5d));
         stmt.setDate(12, date);
         stmt.setString(13, "b.varchar" + String.valueOf(i));
         stmt.setString(14, "b.char" + String.valueOf(i));
         stmt.setInt(15, i);
         stmt.setLong(16, i);
-        stmt.setBigDecimal(17, new BigDecimal((double)i));
+        stmt.setBigDecimal(17, new BigDecimal(i*0.5d));
         stmt.setDate(18, date);
         stmt.executeUpdate();
     }
@@ -95,7 +98,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
         assertEquals("VARCHAR" + String.valueOf(i) + "_" + StringUtils.rightPad("CHAR" + String.valueOf(i), 6, ' ')
                 + "_A.VARCHAR" + String.valueOf(i) + "_" + StringUtils.rightPad("B.CHAR" + String.valueOf(i), 10, ' '),
                 rs.getString(1));
-        assertEquals(i * 4, rs.getInt(2));
+        assertEquals(i * 3, rs.getInt(2));
         Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i) * NUM_MILLIS_IN_DAY);
         assertEquals(date, rs.getDate(3));
         assertEquals(date, rs.getDate(4));
@@ -104,7 +107,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
         assertEquals("char" + String.valueOf(i), rs.getString(7));
         assertEquals(i, rs.getInt(8));
         assertEquals(i, rs.getLong(9));
-        assertEquals(i, rs.getDouble(10), 0.000001);
+        assertEquals(i*0.5d, rs.getDouble(10), 0.000001);
         assertEquals(i, rs.getLong(11));
         assertEquals(i, rs.getLong(12));
     }
@@ -123,7 +126,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
                     + (localIndex ? "LOCAL" : "")
                     + " INDEX IDX ON "
                     + fullDataTableName
-                    + " ((UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2)),"
+                    + " ((UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(b.char_col2)),"
                     + " (decimal_pk+int_pk+decimal_col2+int_col1)," + " date_pk+1, date1+1, date2+1 )"
                     + " INCLUDE (long_col1, long_col2)";
             PreparedStatement stmt = conn.prepareStatement(ddl);
@@ -132,14 +135,14 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             // run select query with expression in WHERE clause
             String whereSql = "SELECT long_col1, long_col2 from "
                     + fullDataTableName
-                    + " WHERE UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2) = ?"
+                    + " WHERE UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(b.char_col2) = ?"
                     + " AND decimal_pk+int_pk+decimal_col2+int_col1=?"
                     // since a.date1 and b.date2 are NULLABLE and date is fixed width, these expressions are stored as
                     // DECIMAL in the index (which is not fixed width)
                     + " AND date_pk+1=? AND date1+1=? AND date2+1=?";
             stmt = conn.prepareStatement(whereSql);
             stmt.setString(1, "VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ");
-            stmt.setInt(2, 4);
+            stmt.setInt(2, 3);
             Date date = DateUtil.parseDate("2015-01-02 00:00:00");
             stmt.setDate(3, date);
             stmt.setDate(4, date);
@@ -150,8 +153,8 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             assertEquals(
                     localIndex ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_INDEX_TEST."
                             + dataTableName
-                            + " [-32768,'VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',4,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]\nCLIENT MERGE SORT"
-                            : "CLIENT PARALLEL 1-WAY RANGE SCAN OVER INDEX_TEST.IDX ['VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',4,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]",
+                            + " [-32768,'VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',3,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]\nCLIENT MERGE SORT"
+                            : "CLIENT PARALLEL 1-WAY RANGE SCAN OVER INDEX_TEST.IDX ['VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',3,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]",
                     QueryUtil.getExplainPlan(rs));
 
             // verify that the correct results are returned
@@ -162,7 +165,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             assertFalse(rs.next());
 
             // verify all rows in data table are present in index table
-            String indexSelectSql = "SELECT UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2), "
+            String indexSelectSql = "SELECT UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(b.char_col2), "
                     + "decimal_pk+int_pk+decimal_col2+int_col1, "
                     + "date_pk+1, date1+1, date2+1, "
                     + "varchar_pk, char_pk, int_pk, long_pk, decimal_pk, "
@@ -193,52 +196,79 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             verifyResult(rs, 3);
             verifyResult(rs, 4);
 
-            // update the first row
-            upsert = "UPSERT INTO "
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+    
+    @Test
+    public void testMutableIndexUpdate() throws Exception {
+    	helpTestUpdate(false);
+    }
+
+    @Test
+    public void testMutableLocalIndexUpdate() throws Exception {
+    	helpTestUpdate(true);
+    }
+    
+    protected void helpTestUpdate(boolean localIndex) throws Exception {
+        String dataTableName = MUTABLE_INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+
+            // create an expression index
+            String ddl = "CREATE "
+                    + (localIndex ? "LOCAL" : "")
+                    + " INDEX IDX ON "
+                    + fullDataTableName
+                    + " ((UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2)),"
+                    + " (decimal_pk+int_pk+decimal_col2+int_col1)," + " date_pk+1, date1+1, date2+1 )"
+                    + " INCLUDE (long_col1, long_col2)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+
+            // update index pk column and covered column
+            String upsert = "UPSERT INTO "
                     + fullDataTableName
-                    + "(varchar_pk, char_pk, int_pk, long_pk, decimal_pk, date_pk, a.varchar_col1) VALUES(?, ?, ?, ?, ?, ?, ?)";
+                    + "(varchar_pk, char_pk, int_pk, long_pk, decimal_pk, date_pk, varchar_col1, long_col1) VALUES(?, ?, ?, ?, ?, ?, ?, ?)";
 
             stmt = conn.prepareStatement(upsert);
             stmt.setString(1, "varchar1");
             stmt.setString(2, "char1");
             stmt.setInt(3, 1);
             stmt.setLong(4, 1l);
-            stmt.setBigDecimal(5, new BigDecimal(1.0));
+            stmt.setBigDecimal(5, new BigDecimal(0.5));
             stmt.setDate(6, DateUtil.parseDate("2015-01-01 00:00:00"));
             stmt.setString(7, "a.varchar_updated");
+            stmt.setLong(8, 101);
             stmt.executeUpdate();
             conn.commit();
 
             // verify only one row was updated in the data table
-            String selectSql = "UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2) from "
+            String selectSql = "UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2), long_col1 from "
                     + fullDataTableName;
-            rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ " + selectSql);
+            ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ " + selectSql);
             assertTrue(rs.next());
             assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_B.CHAR1   ", rs.getString(1));
+            assertEquals(101, rs.getLong(2));
             assertTrue(rs.next());
             assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("VARCHAR3_CHAR3 _A.VARCHAR3_B.CHAR3   ", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("VARCHAR4_CHAR4 _A.VARCHAR4_B.CHAR4   ", rs.getString(1));
+            assertEquals(2, rs.getLong(2));
             assertFalse(rs.next());
 
             // verify that the rows in the index table are also updated
             rs = conn.createStatement().executeQuery("SELECT " + selectSql);
             assertTrue(rs.next());
-            // if the data table is immutable, the index table will have one more
-            // row
-            if (!mutable) {
-                assertEquals("VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ", rs.getString(1));
-                assertTrue(rs.next());
-            }
-            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_" + (mutable ? "B.CHAR1   " : ""), rs.getString(1));
+            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_B.CHAR1   ", rs.getString(1));
+            assertEquals(101, rs.getLong(2));
             assertTrue(rs.next());
             assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("VARCHAR3_CHAR3 _A.VARCHAR3_B.CHAR3   ", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("VARCHAR4_CHAR4 _A.VARCHAR4_B.CHAR4   ", rs.getString(1));
+            assertEquals(2, rs.getLong(2));
             assertFalse(rs.next());
             conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
         } finally {
@@ -546,8 +576,6 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
                     + " (int_col1+1)";
 
-            conn = DriverManager.getConnection(getUrl(), props);
-            conn.setAutoCommit(false);
             PreparedStatement stmt = conn.prepareStatement(ddl);
             stmt.execute();
             String sql = "SELECT int_col1+1 FROM " + fullDataTableName + " where int_col1+1 IN (2)";
@@ -567,25 +595,25 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
 
     @Test
     public void testOrderByWithImmutableIndex() throws Exception {
-        helpTestOrderByWithIndex(false, false);
+        helpTestSelectAliasAndOrderByWithIndex(false, false);
     }
 
     @Test
     public void testOrderByWithImmutableLocalIndex() throws Exception {
-        helpTestOrderByWithIndex(false, true);
+        helpTestSelectAliasAndOrderByWithIndex(false, true);
     }
 
     @Test
     public void testOrderByWithMutableIndex() throws Exception {
-        helpTestOrderByWithIndex(true, false);
+        helpTestSelectAliasAndOrderByWithIndex(true, false);
     }
 
     @Test
     public void testOrderByWithMutableLocalIndex() throws Exception {
-        helpTestOrderByWithIndex(true, false);
+        helpTestSelectAliasAndOrderByWithIndex(true, false);
     }
 
-    protected void helpTestOrderByWithIndex(boolean mutable, boolean localIndex) throws Exception {
+    protected void helpTestSelectAliasAndOrderByWithIndex(boolean mutable, boolean localIndex) throws Exception {
         String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
         String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -596,11 +624,9 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
                     + " (int_col1+1)";
 
-            conn = DriverManager.getConnection(getUrl(), props);
-            conn.setAutoCommit(false);
             PreparedStatement stmt = conn.prepareStatement(ddl);
             stmt.execute();
-            String sql = "SELECT int_col1+1 FROM " + fullDataTableName + " ORDER BY int_col1+1";
+            String sql = "SELECT int_col1+1 AS foo FROM " + fullDataTableName + " ORDER BY foo";
             ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
             assertEquals("CLIENT PARALLEL 1-WAY "
                     + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
@@ -620,66 +646,13 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
     }
 
     @Test
-    public void testSelectColOnlyInDataTableImmutableIndex() throws Exception {
-        helpTestSelectColOnlyInDataTable(false, false);
-    }
-
-    @Test
-    public void testSelectColOnlyInDataTableImmutableLocalIndex() throws Exception {
-        helpTestSelectColOnlyInDataTable(false, true);
-    }
-
-    @Test
-    public void testSelectColOnlyInDataTableMutableIndex() throws Exception {
-        helpTestSelectColOnlyInDataTable(true, false);
-    }
-
-    @Test
-    public void testSelectColOnlyInDataTableMutableLocalIndex() throws Exception {
-        helpTestSelectColOnlyInDataTable(true, false);
-    }
-
-    protected void helpTestSelectColOnlyInDataTable(boolean mutable, boolean localIndex) throws Exception {
-        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
-        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        try {
-            conn.setAutoCommit(false);
-            populateDataTable(conn, dataTableName);
-            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
-                    + " (int_col1+1)";
-
-            conn = DriverManager.getConnection(getUrl(), props);
-            conn.setAutoCommit(false);
-            PreparedStatement stmt = conn.prepareStatement(ddl);
-            stmt.execute();
-            String sql = "SELECT int_col1+1, int_col2 FROM " + fullDataTableName + " WHERE int_col1+1=2";
-            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
-            assertEquals("CLIENT PARALLEL 1-WAY "
-                    + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
-                            + " [-32768,2]\n    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT" : "FULL SCAN OVER "
-                            + fullDataTableName + "\n    SERVER FILTER BY (A.INT_COL1 + 1) = 2"),
-                    QueryUtil.getExplainPlan(rs));
-            rs = conn.createStatement().executeQuery(sql);
-            assertTrue(rs.next());
-            assertEquals(2, rs.getInt(1));
-            assertEquals(1, rs.getInt(2));
-            assertFalse(rs.next());
-            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
-        } finally {
-            conn.close();
-        }
-    }
-    
-    @Test
     public void testImmutableIndexWithCaseSensitiveCols() throws Exception {
         helpTestIndexWithCaseSensitiveCols(false, false);
     }
     
     @Test
     public void testImmutableLocalIndexWithCaseSensitiveCols() throws Exception {
-        helpTestIndexWithCaseSensitiveCols(true, false);
+        helpTestIndexWithCaseSensitiveCols(false, true);
     }
     
     @Test
@@ -689,27 +662,25 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
     
     @Test
     public void testMutableLocalIndexWithCaseSensitiveCols() throws Exception {
-        helpTestIndexWithCaseSensitiveCols(true, false);
+        helpTestIndexWithCaseSensitiveCols(true, true);
     }
     
     protected void helpTestIndexWithCaseSensitiveCols(boolean mutable, boolean localIndex) throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
-            conn.createStatement().execute("CREATE TABLE cs (k VARCHAR NOT NULL PRIMARY KEY, \"V1\" VARCHAR, \"v2\" VARCHAR) "+ (mutable ? "IMMUTABLE_ROWS=true" : ""));
+            conn.createStatement().execute("CREATE TABLE cs (k VARCHAR NOT NULL PRIMARY KEY, \"cf1\".\"V1\" VARCHAR, \"CF2\".\"v2\" VARCHAR) "+ (mutable ? "IMMUTABLE_ROWS=true" : ""));
             String query = "SELECT * FROM cs";
             ResultSet rs = conn.createStatement().executeQuery(query);
             assertFalse(rs.next());
-            if (localIndex) {
-                conn.createStatement().execute("CREATE LOCAL INDEX ics ON cs (\"v2\" || '_modified') INCLUDE (\"V1\",\"v2\")");
-            } else {
-                conn.createStatement().execute("CREATE INDEX ics ON cs (\"V1\" || '_' || \"v2\") INCLUDE (\"V1\",\"v2\")");
-            }
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX ics ON cs (\"cf1\".\"V1\" || '_' || \"CF2\".\"v2\") INCLUDE (\"V1\",\"v2\")";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
             query = "SELECT * FROM ics";
             rs = conn.createStatement().executeQuery(query);
             assertFalse(rs.next());
 
-            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO cs VALUES(?,?,?)");
+            stmt = conn.prepareStatement("UPSERT INTO cs VALUES(?,?,?)");
             stmt.setString(1,"a");
             stmt.setString(2, "x");
             stmt.setString(3, "1");
@@ -720,7 +691,6 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             stmt.execute();
             conn.commit();
 
-            //TODO FIX THIS change this to *
             query = "SELECT (\"V1\" || '_' || \"v2\"), k, \"V1\", \"v2\"  FROM cs WHERE (\"V1\" || '_' || \"v2\") = 'x_1'";
             rs = conn.createStatement().executeQuery("EXPLAIN " + query);
             if(localIndex){
@@ -737,7 +707,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             assertEquals("x",rs.getString(3));
             assertEquals("1",rs.getString(4));
             //TODO figure out why this " " is needed
-            assertEquals("x_1",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertEquals("x_1",rs.getString("\"('cf1'.'V1' || '_' || 'CF2'.'v2')\""));
             assertEquals("a",rs.getString("k"));
             assertEquals("x",rs.getString("V1"));
             assertEquals("1",rs.getString("v2"));
@@ -763,7 +733,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             assertEquals("x_1",rs.getString(4));
             assertEquals("x_1",rs.getString("Foo1"));
             assertEquals("x_1",rs.getString(5));
-            assertEquals("x_1",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertEquals("x_1",rs.getString("\"('cf1'.'V1' || '_' || 'CF2'.'v2')\""));
             assertTrue(rs.next());
             assertEquals("y",rs.getString(1));
             assertEquals("y",rs.getString("V1"));
@@ -774,12 +744,65 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             assertEquals("y_2",rs.getString(4));
             assertEquals("y_2",rs.getString("Foo1"));
             assertEquals("y_2",rs.getString(5));
-            assertEquals("y_2",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertEquals("y_2",rs.getString("\"('cf1'.'V1' || '_' || 'CF2'.'v2')\""));
             assertFalse(rs.next());
             conn.createStatement().execute("DROP INDEX ICS ON CS");
         } finally {
             conn.close();
         }
+    }    
+    
+    @Test
+    public void testSelectColOnlyInDataTableImmutableIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(false, false);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableImmutableLocalIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(false, true);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableMutableIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(true, false);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableMutableLocalIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(true, true);
+    }
+
+    protected void helpTestSelectColOnlyInDataTable(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+1)";
+
+            conn = DriverManager.getConnection(getUrl(), props);
+            conn.setAutoCommit(false);
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            String sql = "SELECT int_col1+1, int_col2 FROM " + fullDataTableName + " WHERE int_col1+1=2";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
+            assertEquals("CLIENT PARALLEL 1-WAY "
+                    + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
+                            + " [-32768,2]\n    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT" : "FULL SCAN OVER "
+                            + fullDataTableName + "\n    SERVER FILTER BY (A.INT_COL1 + 1) = 2"),
+                    QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(sql);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertEquals(1, rs.getInt(2));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
     }
     
     @Test
@@ -809,58 +832,439 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
 
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-
-        // make sure that the tables are empty, but reachable
-        conn.createStatement().execute(
-          "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-        query = "SELECT * FROM t" ;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-        String indexName = "it_" + (mutable ? "m" : "im") + "_" + (local ? "l" : "h");
-        conn.createStatement().execute("CREATE " + ( local ? "LOCAL" : "") + " INDEX " + indexName + " ON t (v1 || '_' || v2)");
+        try {
+	        conn.setAutoCommit(false);
+	
+	        // make sure that the tables are empty, but reachable
+	        conn.createStatement().execute(
+	          "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+	        query = "SELECT * FROM t" ;
+	        rs = conn.createStatement().executeQuery(query);
+	        assertFalse(rs.next());
+	        String indexName = "it_" + (mutable ? "m" : "im") + "_" + (local ? "l" : "h");
+	        conn.createStatement().execute("CREATE " + ( local ? "LOCAL" : "") + " INDEX " + indexName + " ON t (v1 || '_' || v2)");
+	
+	        query = "SELECT * FROM t";
+	        rs = conn.createStatement().executeQuery(query);
+	        assertFalse(rs.next());
+	
+	        // load some data into the table
+	        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?)");
+	        stmt.setString(1, "a");
+	        stmt.setString(2, "x");
+	        stmt.setString(3, "1");
+	        stmt.execute();
+	        conn.commit();
+	
+	        assertIndexExists(conn,true);
+	        conn.createStatement().execute("ALTER TABLE t DROP COLUMN v1");
+	        assertIndexExists(conn,false);
+	
+	        query = "SELECT * FROM t";
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("1",rs.getString(2));
+	        assertFalse(rs.next());
+	
+	        // load some data into the table
+	        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?)");
+	        stmt.setString(1, "a");
+	        stmt.setString(2, "2");
+	        stmt.execute();
+	        conn.commit();
+	
+	        query = "SELECT * FROM t";
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("2",rs.getString(2));
+	        assertFalse(rs.next());
+        }
+        finally {
+        	conn.close();
+        }
+    }
+    
+    private static void assertIndexExists(Connection conn, boolean exists) throws SQLException {
+        ResultSet rs = conn.getMetaData().getIndexInfo(null, null, "T", false, false);
+        assertEquals(exists, rs.next());
+    }
+    
+    @Test
+    public void testImmutableIndexDropCoveredColumn() throws Exception {
+    	helpTestDropCoveredColumn(false, false);
+    }
+    
+    @Test
+    public void testImmutableLocalIndexDropCoveredColumn() throws Exception {
+    	helpTestDropCoveredColumn(false, true);
+    }
+    
+    @Test
+    public void testMutableIndexDropCoveredColumn() throws Exception {
+    	helpTestDropCoveredColumn(true, false);
+    }
+    
+    @Test
+    public void testMutableLocalIndexDropCoveredColumn() throws Exception {
+    	helpTestDropCoveredColumn(true, true);
+    }
+    
+    public void helpTestDropCoveredColumn(boolean mutable, boolean local) throws Exception {
+        ResultSet rs;
+        PreparedStatement stmt;
 
-        query = "SELECT * FROM t";
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+	        conn.setAutoCommit(false);
+	
+	        // make sure that the tables are empty, but reachable
+	        conn.createStatement().execute(
+	          "CREATE TABLE t"
+	              + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR)");
+	        String dataTableQuery = "SELECT * FROM t";
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertFalse(rs.next());
+	
+	        String indexName = "it_" + (mutable ? "m" : "im") + "_" + (local ? "l" : "h");
+	        conn.createStatement().execute("CREATE " + ( local ? "LOCAL" : "") + " INDEX " + indexName + " ON t (k || '_' || v1) include (v2, v3)");
+	        String indexTableQuery = "SELECT * FROM " + indexName;
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertFalse(rs.next());
+	
+	        // load some data into the table
+	        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?,?)");
+	        stmt.setString(1, "a");
+	        stmt.setString(2, "x");
+	        stmt.setString(3, "1");
+	        stmt.setString(4, "j");
+	        stmt.execute();
+	        conn.commit();
+	
+	        assertIndexExists(conn,true);
+	        conn.createStatement().execute("ALTER TABLE t DROP COLUMN v2");
+	        assertIndexExists(conn,true);
+	
+	        // verify data table rows
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("x",rs.getString(2));
+	        assertEquals("j",rs.getString(3));
+	        assertFalse(rs.next());
+	        
+	        // verify index table rows
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a_x",rs.getString(1));
+	        assertEquals("a",rs.getString(2));
+	        assertEquals("j",rs.getString(3));
+	        assertFalse(rs.next());
+	
+	        // add another row
+	        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?)");
+	        stmt.setString(1, "b");
+	        stmt.setString(2, "y");
+	        stmt.setString(3, "k");
+	        stmt.execute();
+	        conn.commit();
+	
+	        // verify data table rows
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("x",rs.getString(2));
+	        assertEquals("j",rs.getString(3));
+	        assertTrue(rs.next());
+	        assertEquals("b",rs.getString(1));
+	        assertEquals("y",rs.getString(2));
+	        assertEquals("k",rs.getString(3));
+	        assertFalse(rs.next());
+	        
+	        // verify index table rows
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a_x",rs.getString(1));
+	        assertEquals("a",rs.getString(2));
+	        assertEquals("j",rs.getString(3));
+	        assertTrue(rs.next());
+	        assertEquals("b_y",rs.getString(1));
+	        assertEquals("b",rs.getString(2));
+	        assertEquals("k",rs.getString(3));
+	        assertFalse(rs.next());
+        }
+        finally {
+        	conn.close();
+        }
+    }
+    
+    @Test
+    public void testImmutableIndexAddPKColumnToTable() throws Exception {
+    	helpTestAddPKColumnToTable(false, false);
+    }
+    
+    @Test
+    public void testImmutableLocalIndexAddPKColumnToTable() throws Exception {
+    	helpTestAddPKColumnToTable(false, true);
+    }
+    
+    @Test
+    public void testMutableIndexAddPKColumnToTable() throws Exception {
+    	helpTestAddPKColumnToTable(true, false);
+    }
+    
+    @Test
+    public void testMutableLocalIndexAddPKColumnToTable() throws Exception {
+    	helpTestAddPKColumnToTable(true, true);
+    }
+    
+    public void helpTestAddPKColumnToTable(boolean mutable, boolean local) throws Exception {
+        ResultSet rs;
+        PreparedStatement stmt;
 
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "x");
-        stmt.setString(3, "1");
-        stmt.execute();
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+	        conn.setAutoCommit(false);
+	
+	        // make sure that the tables are empty, but reachable
+	        conn.createStatement().execute(
+	          "CREATE TABLE t"
+	              + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+	        String dataTableQuery = "SELECT * FROM t";
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertFalse(rs.next());
+	
+	        String indexName = "IT_" + (mutable ? "M" : "IM") + "_" + (local ? "L" : "H");
+	        conn.createStatement().execute("CREATE " + ( local ? "LOCAL" : "") + " INDEX " + indexName + " ON t (v1 || '_' || v2)");
+	        String indexTableQuery = "SELECT * FROM " + indexName;
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertFalse(rs.next());
+	
+	        // load some data into the table
+	        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?)");
+	        stmt.setString(1, "a");
+	        stmt.setString(2, "x");
+	        stmt.setString(3, "1");
+	        stmt.execute();
+	        conn.commit();
+	
+	        assertIndexExists(conn,true);
+	        conn.createStatement().execute("ALTER TABLE t ADD v3 VARCHAR, k2 DECIMAL PRIMARY KEY");
+	        rs = conn.getMetaData().getPrimaryKeys("", "", "T");
+	        assertTrue(rs.next());
+	        assertEquals("K",rs.getString("COLUMN_NAME"));
+	        assertEquals(1, rs.getShort("KEY_SEQ"));
+	        assertTrue(rs.next());
+	        assertEquals("K2",rs.getString("COLUMN_NAME"));
+	        assertEquals(2, rs.getShort("KEY_SEQ"));
+	
+	        rs = conn.getMetaData().getPrimaryKeys("", "", indexName);
+	        assertTrue(rs.next());
+	        assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "(V1 || '_' || V2)",rs.getString("COLUMN_NAME"));
+	        int offset = local ? 1 : 0;
+	        assertEquals(offset+1, rs.getShort("KEY_SEQ"));
+	        assertTrue(rs.next());
+	        assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K",rs.getString("COLUMN_NAME"));
+	        assertEquals(offset+2, rs.getShort("KEY_SEQ"));
+	        assertTrue(rs.next());
+	        assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K2",rs.getString("COLUMN_NAME"));
+	        assertEquals(offset+3, rs.getShort("KEY_SEQ"));
+	
+	        // verify data table rows
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("x",rs.getString(2));
+	        assertEquals("1",rs.getString(3));
+	        assertNull(rs.getBigDecimal(4));
+	        assertFalse(rs.next());
+	        
+	        // verify index table rows
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("x_1",rs.getString(1));
+	        assertEquals("a",rs.getString(2));
+	        assertNull(rs.getBigDecimal(3));
+	        assertFalse(rs.next());
+	
+	        // load some data into the table
+	        stmt = conn.prepareStatement("UPSERT INTO t(K,K2,V1,V2) VALUES(?,?,?,?)");
+	        stmt.setString(1, "b");
+	        stmt.setBigDecimal(2, BigDecimal.valueOf(2));
+	        stmt.setString(3, "y");
+	        stmt.setString(4, "2");
+	        stmt.execute();
+	        conn.commit();
+	
+	        // verify data table rows
+	        rs = conn.createStatement().executeQuery(dataTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("a",rs.getString(1));
+	        assertEquals("x",rs.getString(2));
+	        assertEquals("1",rs.getString(3));
+	        assertNull(rs.getString(4));
+	        assertNull(rs.getBigDecimal(5));
+	        assertTrue(rs.next());
+	        assertEquals("b",rs.getString(1));
+	        assertEquals("y",rs.getString(2));
+	        assertEquals("2",rs.getString(3));
+	        assertNull(rs.getString(4));
+	        assertEquals(BigDecimal.valueOf(2),rs.getBigDecimal(5));
+	        assertFalse(rs.next());
+	        
+	        // verify index table rows
+	        rs = conn.createStatement().executeQuery(indexTableQuery);
+	        assertTrue(rs.next());
+	        assertEquals("x_1",rs.getString(1));
+	        assertEquals("a",rs.getString(2));
+	        assertNull(rs.getBigDecimal(3));
+	        assertTrue(rs.next());
+	        assertEquals("y_2",rs.getString(1));
+	        assertEquals("b",rs.getString(2));
+	        assertEquals(BigDecimal.valueOf(2),rs.getBigDecimal(3));
+	        assertFalse(rs.next());
+        }
+        finally {
+        	conn.close();
+        }
+    }
+    
+    @Test
+    public void testUpdatableViewWithIndex() throws Exception {
+        helpTestUpdatableViewIndex(false);
+    }
+    
+    @Test
+    public void testUpdatableViewWithLocalIndex() throws Exception {
+        helpTestUpdatableViewIndex(true);
+    }
+       
+    private void helpTestUpdatableViewIndex(boolean local) throws Exception {
+    	Connection conn = DriverManager.getConnection(getUrl());
+    	try {
+	        String ddl = "CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, k3 DECIMAL, s1 VARCHAR, s2 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, k3))";
+	        conn.createStatement().execute(ddl);
+	        ddl = "CREATE VIEW v AS SELECT * FROM t WHERE k1 = 1";
+	        conn.createStatement().execute(ddl);
+	        conn.createStatement().execute("UPSERT INTO v(k2,s1,s2,k3) VALUES(120,'foo0','bar0',50.0)");
+	        conn.createStatement().execute("UPSERT INTO v(k2,s1,s2,k3) VALUES(121,'foo1','bar1',51.0)");
+	        conn.commit();
+	        
+	        ResultSet rs;
+	        conn.createStatement().execute("CREATE " + (local ? "LOCAL" : "") + " INDEX i1 on v(k1+k2+k3) include (s1, s2)");
+	        conn.createStatement().execute("UPSERT INTO v(k2,s1,s2,k3) VALUES(120,'foo2','bar2',50.0)");
+	        conn.commit();
+	
+	        String query = "SELECT k1, k2, k3, s1, s2 FROM v WHERE 	k1+k2+k3 = 173.0";
+	        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+	        String queryPlan = QueryUtil.getExplainPlan(rs);
+	        if (local) {
+	            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_T [-32768,173]\n" + "CLIENT MERGE SORT",
+	                    queryPlan);
+	        } else {
+	            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [" + Short.MIN_VALUE + ",173]", queryPlan);
+	        }
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals(1, rs.getInt(1));
+	        assertEquals(121, rs.getInt(2));
+	        assertTrue(BigDecimal.valueOf(51.0).compareTo(rs.getBigDecimal(3))==0);
+	        assertEquals("foo1", rs.getString(4));
+	        assertEquals("bar1", rs.getString(5));
+	        assertFalse(rs.next());
+	
+	        conn.createStatement().execute("CREATE " + (local ? "LOCAL" : "") + " INDEX i2 on v(s1||'_'||s2)");
+	        
+	        query = "SELECT k1, k2, s1||'_'||s2 FROM v WHERE (s1||'_'||s2)='foo2_bar2'";
+	        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+	        if (local) {
+	            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_T [" + (Short.MIN_VALUE + 1)
+	                    + ",'foo2_bar2']\n" + "    SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT",
+	                    QueryUtil.getExplainPlan(rs));
+	        } else {
+	            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [" + (Short.MIN_VALUE + 1) + ",'foo2_bar2']\n"
+	                    + "    SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs));
+	        }
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals(1, rs.getInt(1));
+	        assertEquals(120, rs.getInt(2));
+	        assertEquals("foo2_bar2", rs.getString(3));
+	        assertFalse(rs.next());
+    	}
+        finally {
+        	conn.close();
+        }
+    }
+    
+    @Test
+    public void testViewUsesTableIndex() throws Exception {
+        ResultSet rs;
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, s1 VARCHAR, s2 VARCHAR, s3 VARCHAR, s4 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2))";
+        conn.createStatement().execute(ddl);
+        conn.createStatement().execute("CREATE INDEX i1 ON t(k2, s2, s3, s1)");
+        conn.createStatement().execute("CREATE INDEX i2 ON t(k2, s2||'_'||s3, s1, s4)");
+        
+        ddl = "CREATE VIEW v AS SELECT * FROM t WHERE s1 = 'foo'";
+        conn.createStatement().execute(ddl);
+        conn.createStatement().execute("UPSERT INTO t VALUES(1,1,'foo','abc','cab')");
+        conn.createStatement().execute("UPSERT INTO t VALUES(2,2,'bar','xyz','zyx')");
         conn.commit();
-
-        assertIndexExists(conn,true);
-        conn.createStatement().execute("ALTER TABLE t DROP COLUMN v1");
-        assertIndexExists(conn,false);
-
-        query = "SELECT * FROM t";
-        rs = conn.createStatement().executeQuery(query);
+        
+        rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
         assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("1",rs.getString(2));
+        assertEquals(1, rs.getLong(1));
         assertFalse(rs.next());
-
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "2");
-        stmt.execute();
-        conn.commit();
-
-        query = "SELECT * FROM t";
+        
+        conn.createStatement().execute("CREATE INDEX vi1 on v(k2)");
+
+        //i2 should be used since it contains s3||'_'||s4 i
+        String query = "SELECT s2||'_'||s3 FROM v WHERE k2=1 AND (s2||'_'||s3)='abc_cab'";
+        rs = conn.createStatement(  ).executeQuery("EXPLAIN " + query);
+        String queryPlan = QueryUtil.getExplainPlan(rs);
+        assertEquals(
+                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I2 [1,'abc_cab','foo']\n" + 
+                "    SERVER FILTER BY FIRST KEY ONLY", queryPlan);
         rs = conn.createStatement().executeQuery(query);
         assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("2",rs.getString(2));
+        assertEquals("abc_cab", rs.getString(1));
         assertFalse(rs.next());
+        
+        conn.createStatement().execute("ALTER VIEW v DROP COLUMN s4");
+        conn.createStatement().execute("CREATE INDEX vi2 on v(k2)");
+        //i2 cannot be used since s4 has been dropped from the view, so i1 will be used 
+        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        queryPlan = QueryUtil.getExplainPlan(rs);
+        assertEquals(
+                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I1 [1]\n" + 
+                "    SERVER FILTER BY FIRST KEY ONLY AND ((\"S2\" || '_' || \"S3\") = 'abc_cab' AND \"S1\" = 'foo')", queryPlan);
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("abc_cab", rs.getString(1));
+        assertFalse(rs.next());    
     }
     
-    private static void assertIndexExists(Connection conn, boolean exists) throws SQLException {
-        ResultSet rs = conn.getMetaData().getIndexInfo(null, null, "T", false, false);
-        assertEquals(exists, rs.next());
-    }
+	@Test
+	public void testExpressionThrowsException() throws Exception {
+		Connection conn = DriverManager.getConnection(getUrl());
+		String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY, k2 INTEGER)";
+		try {
+			conn.createStatement().execute(ddl);
+			ddl = "CREATE INDEX i on t(k1/k2)";
+			conn.createStatement().execute(ddl);
+			// upsert should succeed
+			conn.createStatement().execute("UPSERT INTO T VALUES(1,1)");
+			conn.commit();
+			// divide by zero should fail
+			conn.createStatement().execute("UPSERT INTO T VALUES(1,0)");
+			conn.commit();
+			fail();
+		} catch (CommitException e) {
+		}
+	}
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index ce81e1f..e234498 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -61,7 +61,6 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.List;
 import java.util.Set;
 
@@ -1414,12 +1413,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                     for (PTable index : table.getIndexes()) {
                                         try {
                                             IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
-                                            // get the columns required to create the index 
+                                            // get the columns required for the index pk
                                             Set<ColumnReference> indexColumns = indexMaintainer.getIndexedColumns();
                                             byte[] indexKey =
                                                     SchemaUtil.getTableKey(tenantId, index
                                                             .getSchemaName().getBytes(), index.getTableName().getBytes());
-                                            // If index requires this column, then drop it
+                                            // If index requires this column for its pk, then drop it
                                             if (indexColumns.contains(new ColumnReference(columnToDelete.getFamilyName().getBytes(), columnToDelete.getName().getBytes()))) {
                                                 // Since we're dropping the index, lock it to ensure
                                                 // that a change in index state doesn't

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index b2ca979..f4b4f98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -154,8 +154,9 @@ public enum SQLExceptionCode {
      /**
       *  Expression Index exceptions.
       */
-     AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggreagaate expression are not allowed in an index"),
-     NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", "Non-deterministic expression are not allowed in an index"),
+     AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggreagaate expression not allowed in an index"),
+     NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", "Non-deterministic expression not allowed in an index"),
+     STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", "Stateless expression not allowed in an index"),
 
      /** 
      * HBase and Phoenix specific implementation defined sub-classes.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 7199dad..fd006c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -692,10 +692,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                 indexFields[pos] = dataRowKeySchema.getField(i);
             } 
         }
-        Iterator<Expression> expressionSetItr = indexedExpressions.iterator();
+        Iterator<Expression> expressionItr = indexedExpressions.iterator();
         for (Field indexField : indexFields) {
             if (indexField == null) { // Add field for kv column in index
-                final PDataType dataType = expressionSetItr.next().getDataType();
+                final PDataType dataType = expressionItr.next().getDataType();
                 builder.addField(new PDatum() {
 
                     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 61ee081..64e62f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -433,14 +433,34 @@ public class MetaDataClient {
         for (PTable index : indexes) {
             if (index.getViewIndexId() == null) {
                 boolean containsAllReqdCols = true;
-                // Ensure that all indexed columns from index on physical table
+                // Ensure that all columns required to create index
                 // exist in the view too (since view columns may be removed)
-                List<PColumn> pkColumns = index.getPKColumns();
-                for (int i = index.getBucketNum() == null ? 0 : 1; i < pkColumns.size(); i++) {
+                IndexMaintainer indexMaintainer = index.getIndexMaintainer(physicalTable, connection);
+                // check that the columns required for the index pk (not including the pk columns of the data table)
+                // are present in the view
+                Set<ColumnReference> indexColRefs = indexMaintainer.getIndexedColumns();
+                for (ColumnReference colRef : indexColRefs) {
+                    try {
+                        byte[] cf= colRef.getFamily();
+                        byte[] cq= colRef.getQualifier();
+                        if (cf!=null) {
+                            table.getColumnFamily(cf).getColumn(cq);
+                        }
+                        else {
+                            table.getColumn( Bytes.toString(cq));
+                        }
+                    } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
+                        containsAllReqdCols = false;
+                        break;
+                    }
+                }
+                // check that pk columns of the data table (which are also present in the index pk) are present in the view
+                List<PColumn> pkColumns = physicalTable.getPKColumns();
+                for (int i = physicalTable.getBucketNum() == null ? 0 : 1; i < pkColumns.size(); i++) {
                     try {
                         PColumn pkColumn = pkColumns.get(i);
-                        IndexUtil.getDataColumn(table, pkColumn.getName().getString());
-                    } catch (IllegalArgumentException e) { // Ignore this index and continue with others
+                        table.getColumn(pkColumn.getName().getString());
+                    } catch (ColumnNotFoundException e) { // Ignore this index and continue with others
                         containsAllReqdCols = false;
                         break;
                     }
@@ -993,9 +1013,8 @@ public class MetaDataClient {
                     if (expression.getDeterminism() != Determinism.ALWAYS) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                     }
-                    // true for any constant (including a view constant), as we don't need these in the index
                     if (expression.isStateless()) {
-                        continue;
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                     }
                     unusedPkColumns.remove(expression);
                     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e5a6308/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 466db9f..4accd38 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1493,5 +1493,77 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         assertTrue(scan.getFilter() instanceof FirstKeyOnlyFilter);
         assertEquals(1, scan.getFamilyMap().size());
     }
+    
+    @Test 
+    public void testNonDeterministicExpressionIndex() throws Exception {
+        String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)";
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            stmt.execute(ddl);
+            stmt.execute("CREATE INDEX i ON t (RAND())");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode());
+        }
+        finally {
+            stmt.close();
+        }
+    }
+    
+    @Test 
+    public void testStatelessExpressionIndex() throws Exception {
+        String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)";
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            stmt.execute(ddl);
+            stmt.execute("CREATE INDEX i ON t (2)");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode());
+        }
+        finally {
+            stmt.close();
+        }
+    }
+    
+    @Test 
+    public void testAggregateExpressionIndex() throws Exception {
+        String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)";
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            stmt.execute(ddl);
+            stmt.execute("CREATE INDEX i ON t (SUM(k1))");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode());
+        }
+        finally {
+            stmt.close();
+        }
+    }
+    
+    @Test 
+    public void testDivideByZeroExpressionIndex() throws Exception {
+        String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)";
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            stmt.execute(ddl);
+            stmt.execute("CREATE INDEX i ON t (k1/0)");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.DIVIDE_BY_ZERO.getErrorCode(), e.getErrorCode());
+        }
+        finally {
+            stmt.close();
+        }
+    }
 
 }


[31/50] [abbrv] phoenix git commit: Fix for TableNotFoundException when optimizing SELECT * FROM a tenant-specific table

Posted by ma...@apache.org.
Fix for TableNotFoundException when optimizing SELECT * FROM a tenant-specific table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d4f7b71a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d4f7b71a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d4f7b71a

Branch: refs/heads/calcite
Commit: d4f7b71a18859d201c3cab74acdff29702cddb76
Parents: f925a40
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Tue Feb 10 21:06:43 2015 -0800
Committer: Thomas <td...@salesforce.com>
Committed: Thu Feb 12 13:10:18 2015 -0800

----------------------------------------------------------------------
 .../index/GlobalIndexOptimizationIT.java        |  2 +-
 .../phoenix/compile/ProjectionCompiler.java     | 28 ++++++++++++++------
 2 files changed, 21 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d4f7b71a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index e4ba2c6..7fb879e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -105,7 +105,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             conn1.commit();
             createIndex(TestUtil.DEFAULT_INDEX_TABLE_NAME, TestUtil.DEFAULT_DATA_TABLE_NAME, "v1");
             
-            String query = "SELECT /*+ INDEX(" + TestUtil.DEFAULT_DATA_TABLE_NAME + " " + TestUtil.DEFAULT_INDEX_TABLE_NAME + ")*/ t_id, k1, k2, k3, V1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME +" where v1='a'";
+            String query = "SELECT /*+ INDEX(" + TestUtil.DEFAULT_DATA_TABLE_NAME + " " + TestUtil.DEFAULT_INDEX_TABLE_NAME + ")*/ * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME +" where v1='a'";
             ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);
             
             String expected = 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d4f7b71a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index 6b518b9..27fe0f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -74,6 +74,7 @@ import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableKey;
@@ -170,24 +171,35 @@ public class ProjectionCompiler {
         PhoenixConnection conn = context.getConnection();
         PName tenantId = conn.getTenantId();
         String tableName = index.getParentName().getString();
-        PTable table = conn.getMetaDataCache().getTable(new PTableKey(tenantId, tableName));
-        int tableOffset = table.getBucketNum() == null ? 0 : 1;
-        int minTablePKOffset = getMinPKOffset(table, tenantId);
+        PTable dataTable = null;
+        try {
+        	dataTable = conn.getMetaDataCache().getTable(new PTableKey(tenantId, tableName));
+        } catch (TableNotFoundException e) {
+            if (tenantId != null) { 
+            	// Check with null tenantId 
+            	dataTable = conn.getMetaDataCache().getTable(new PTableKey(null, tableName));
+            }
+            else {
+            	throw e;
+            }
+        }
+        int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
+        int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
         int minIndexPKOffset = getMinPKOffset(index, tenantId);
         if (index.getIndexType() != IndexType.LOCAL) {
-            if (index.getColumns().size()-minIndexPKOffset != table.getColumns().size()-minTablePKOffset) {
+            if (index.getColumns().size()-minIndexPKOffset != dataTable.getColumns().size()-minTablePKOffset) {
                 // We'll end up not using this by the optimizer, so just throw
                 throw new ColumnNotFoundException(WildcardParseNode.INSTANCE.toString());
             }
         }
-        for (int i = tableOffset, j = tableOffset; i < table.getColumns().size(); i++) {
-            PColumn column = table.getColumns().get(i);
+        for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
+            PColumn column = dataTable.getColumns().get(i);
             // Skip tenant ID column (which may not be the first column, but is the first PK column)
             if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
                 tableOffset++;
                 continue;
             }
-            PColumn tableColumn = table.getColumns().get(i);
+            PColumn tableColumn = dataTable.getColumns().get(i);
             String indexColName = IndexUtil.getIndexColumnName(tableColumn);
             PColumn indexColumn = null;
             ColumnRef ref = null;
@@ -221,7 +233,7 @@ public class ProjectionCompiler {
             // appear as a column in an index
             projectedExpressions.add(expression);
             boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
-            ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
+            ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
             projectedColumns.add(projector);
         }
     }


[18/50] [abbrv] phoenix git commit: PHOENIX-1644 Check for min HBase version before creating local index and provide means of disabling usage

Posted by ma...@apache.org.
PHOENIX-1644 Check for min HBase version before creating local index and provide means of disabling usage


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2b8e6634
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2b8e6634
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2b8e6634

Branch: refs/heads/calcite
Commit: 2b8e66346d2c890ff664c9f2f826acb77a7ac950
Parents: 47ca595
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Feb 6 18:26:57 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Feb 6 18:26:57 2015 -0800

----------------------------------------------------------------------
 .../phoenix/exception/SQLExceptionCode.java     |  3 ++-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |  3 ++-
 .../phoenix/query/ConnectionQueryServices.java  |  2 +-
 .../query/ConnectionQueryServicesImpl.java      | 24 ++++++++++++++++----
 .../query/ConnectionlessQueryServicesImpl.java  |  2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  3 ++-
 6 files changed, 28 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 19e7cdf..b2ca979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -236,7 +236,8 @@ public enum SQLExceptionCode {
     CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot be specified for a column family that is not being added or modified"),
     CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property cannot be set when adding a column"),
     
-    NO_LOCAL_INDEXES(1054, "43A11", "Local secondary indexes are only supported for HBase version " + MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) + " and above."),
+    NO_LOCAL_INDEXES(1054, "43A11", "Local secondary indexes are not supported for HBase versions " + 
+        MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.MIN_LOCAL_SI_VERSION_DISALLOW) + " through " + MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.MAX_LOCAL_SI_VERSION_DISALLOW) + " inclusive."),
     UNALLOWED_LOCAL_INDEXES(1055, "43A12", "Local secondary indexes are configured to not be allowed."),
 
     /** Sequence related */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 7ac2bb6..034c40a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -269,12 +269,13 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
 
     private final PhoenixConnection connection;
     private final ResultSet emptyResultSet;
+    public static final int MAX_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "8");
+    public static final int MIN_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "6");
 
     // Version below which we should turn off essential column family.
     public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "7");
     // Version below which we should disallow usage of mutable secondary indexing.
     public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10");
-    public static final int LOCAL_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "98", "9");
     /** Version below which we fall back on the generic KeyValueBuilder */
     public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index fa44835..09705c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -101,7 +101,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
      */
     public KeyValueBuilder getKeyValueBuilder();
     
-    public enum Feature {};
+    public enum Feature {LOCAL_INDEX};
     public boolean supportsFeature(Feature feature);
     
     public String getUserName();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 7763a0a..4a9eac0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -156,6 +156,7 @@ import com.google.common.base.Stopwatch;
 import com.google.common.base.Throwables;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -200,6 +201,19 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     // setting this member variable guarded by "connectionCountLock"
     private volatile ConcurrentMap<SequenceKey,Sequence> sequenceMap = Maps.newConcurrentMap();
     private KeyValueBuilder kvBuilder;
+
+    private static interface FeatureSupported {
+        boolean isSupported(ConnectionQueryServices services);
+    }
+    
+    private final Map<Feature, FeatureSupported> featureMap = ImmutableMap.<Feature, FeatureSupported>of(
+            Feature.LOCAL_INDEX, new FeatureSupported(){
+                @Override
+                public boolean isSupported(ConnectionQueryServices services) {
+                    int hbaseVersion = services.getLowestClusterHBaseVersion();
+                    return hbaseVersion < PhoenixDatabaseMetaData.MIN_LOCAL_SI_VERSION_DISALLOW || hbaseVersion > PhoenixDatabaseMetaData.MAX_LOCAL_SI_VERSION_DISALLOW;
+                }
+            });
     
     private PMetaData newEmptyMetaData() {
         long maxSizeBytes = props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
@@ -1074,7 +1088,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         // If we're not allowing local indexes or the hbase version is too low,
         // don't create the local index table
         if (   !this.getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX) 
-            || getLowestClusterHBaseVersion() < PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) {
+            || !this.supportsFeature(Feature.LOCAL_INDEX)) {
                     return;
         }
         
@@ -2428,9 +2442,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     @Override
     public boolean supportsFeature(Feature feature) {
-        // TODO: Keep map of Feature -> min HBase version
-        // For now, only Feature is REVERSE_SCAN and it's not supported in any version yet
-        return false;
+        FeatureSupported supported = featureMap.get(feature);
+        if (supported == null) {
+            return false;
+        }
+        return supported.isSupported(this);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 9efbf93..742c38e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -457,7 +457,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
 
     @Override
     public boolean supportsFeature(Feature feature) {
-        return false;
+        return true;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b8e6634/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index effdb54..fceb724 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -144,6 +144,7 @@ import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.PrimaryKeyConstraint;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.UpdateStatisticsStatement;
+import org.apache.phoenix.query.ConnectionQueryServices.Feature;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -915,7 +916,7 @@ public class MetaDataClient {
             if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
             }
-            if (hbaseVersion < PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) {
+            if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
             }
         }


[28/50] [abbrv] phoenix git commit: PHOENIX-1650 Possible resource leak in upsert select query (Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1650 Possible resource leak in upsert select query (Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e0a81a09
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e0a81a09
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e0a81a09

Branch: refs/heads/calcite
Commit: e0a81a09ec7aeab44a2bce597d19850e7c6a7846
Parents: 7dc3d84
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Feb 10 16:51:10 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Feb 10 16:53:26 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/compile/UpsertCompiler.java  | 26 +++++++++++---------
 1 file changed, 15 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0a81a09/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 2ac075e..b21cc2f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -670,18 +670,22 @@ public class UpsertCompiler {
                     if (parallelIteratorFactory == null) {
                         return upsertSelect(statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
                     }
-                    parallelIteratorFactory.setRowProjector(projector);
-                    parallelIteratorFactory.setColumnIndexes(columnIndexes);
-                    parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
-                    Tuple tuple;
-                    long totalRowCount = 0;
-                    while ((tuple=iterator.next()) != null) {// Runs query
-                        Cell kv = tuple.getValue(0);
-                        totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
+                    try {
+                        parallelIteratorFactory.setRowProjector(projector);
+                        parallelIteratorFactory.setColumnIndexes(columnIndexes);
+                        parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
+                        Tuple tuple;
+                        long totalRowCount = 0;
+                        while ((tuple=iterator.next()) != null) {// Runs query
+                            Cell kv = tuple.getValue(0);
+                            totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
+                        }
+                        // Return total number of rows that have been updated. In the case of auto commit being off
+                        // the mutations will all be in the mutation state of the current connection.
+                        return new MutationState(maxSize, statement.getConnection(), totalRowCount);
+                    } finally {
+                        iterator.close();
                     }
-                    // Return total number of rows that have been updated. In the case of auto commit being off
-                    // the mutations will all be in the mutation state of the current connection.
-                    return new MutationState(maxSize, statement.getConnection(), totalRowCount);
                 }
 
                 @Override


[50/50] [abbrv] phoenix git commit: Fix compilation errors and ToExpressionTest after merge

Posted by ma...@apache.org.
Fix compilation errors and ToExpressionTest after merge


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/026f60b1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/026f60b1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/026f60b1

Branch: refs/heads/calcite
Commit: 026f60b12764e68f8e60922c276a44c6543f27ce
Parents: b9d24a9
Author: maryannxue <we...@intel.com>
Authored: Wed Mar 4 17:40:22 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Wed Mar 4 17:40:22 2015 -0500

----------------------------------------------------------------------
 .../apache/phoenix/calcite/PhoenixTable.java    |  2 +-
 .../calcite/PhoenixToEnumerableConverter.java   |  2 +-
 .../apache/phoenix/compile/FromCompiler.java    |  3 +--
 .../phoenix/calcite/ToExpressionTest.java       | 25 ++++----------------
 4 files changed, 7 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/026f60b1/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java
index 9b58d68..730f642 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java
@@ -14,8 +14,8 @@ import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.types.PDataType;
 
 /**
  * Implementation of Calcite {@link org.apache.calcite.schema.Table} SPI for

http://git-wip-us.apache.org/repos/asf/phoenix/blob/026f60b1/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixToEnumerableConverter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixToEnumerableConverter.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixToEnumerableConverter.java
index fc2af9c..e8949d8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixToEnumerableConverter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixToEnumerableConverter.java
@@ -62,7 +62,7 @@ public class PhoenixToEnumerableConverter extends ConverterImpl implements Enume
     }
 
     static Expression stash(EnumerableRelImplementor implementor, Object o, Class clazz) {
-        ParameterExpression x = implementor.register(o, clazz);
+        ParameterExpression x = (ParameterExpression) implementor.stash(o, clazz);
         MethodCallExpression e =
             Expressions.call(implementor.getRootExpression(),
                 org.apache.calcite.util.BuiltInMethod.DATA_CONTEXT_GET.method,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/026f60b1/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 64024c4..977a3f8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -26,7 +26,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -327,7 +326,7 @@ public class FromCompiler {
         private final int tsAddition;
 
         private BaseColumnResolver(PhoenixConnection connection, int tsAddition) {
-            this.connection = Preconditions.checkNotNull(connection);
+            this.connection = connection;
             this.client = connection == null ? null : new MetaDataClient(connection);
             this.tsAddition = tsAddition;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/026f60b1/phoenix-core/src/test/java/org/apache/phoenix/calcite/ToExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/calcite/ToExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/calcite/ToExpressionTest.java
index 91628ff..50ac2a8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/calcite/ToExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/calcite/ToExpressionTest.java
@@ -1,41 +1,30 @@
 package org.apache.phoenix.calcite;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.SQLException;
 import java.util.Collections;
-import java.util.Set;
 
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.phoenix.calcite.PhoenixRel.Implementor;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.FromCompiler;
-import org.apache.phoenix.compile.GroupByCompiler;
-import org.apache.phoenix.compile.HavingCompiler;
-import org.apache.phoenix.compile.LimitCompiler;
-import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.WhereCompiler;
-import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
-import org.apache.phoenix.expression.ColumnExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.SubqueryParseNode;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
-import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.junit.Test;
@@ -43,13 +32,6 @@ import org.junit.Test;
 
 public class ToExpressionTest extends BaseConnectionlessQueryTest {
 	
-	private static Expression compileExpression(PhoenixStatement statement, StatementContext context, String selectStmt) throws SQLException {
-		// Re-parse the WHERE clause as we don't store it any where
-        SelectStatement select = new SQLParser(selectStmt).parseQuery();
-        Expression where = WhereCompiler.compile(context, select, null, Collections.<SubqueryParseNode>emptySet());
-        return where;
-	}
-	
 	@Test
 	public void toExpressionTest() throws Exception {
 		final String expectedColName = "K2";
@@ -59,8 +41,9 @@ public class ToExpressionTest extends BaseConnectionlessQueryTest {
 		final PTable table = conn.unwrap(PhoenixConnection.class).getMetaDataCache().getTable(new PTableKey(null,"T"));
 		PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
 		String query = "SELECT * FROM T WHERE K2 = 'foo'";
-		QueryPlan plan = stmt.compileQuery(query);
-		Expression where = compileExpression(stmt, plan.getContext(), query);
+        SelectStatement select = new SQLParser(query).parseQuery();
+        ColumnResolver resolver = FromCompiler.getResolverForQuery(select, conn.unwrap(PhoenixConnection.class));
+        Expression where = WhereCompiler.compile(new StatementContext(stmt, resolver), select, null, Collections.<SubqueryParseNode>emptySet());
 		
 		JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl();
 		RexBuilder builder = new RexBuilder(typeFactory);


[22/50] [abbrv] phoenix git commit: PHOENIX-1646 Views and functional index expressions may lose information when stringified

Posted by ma...@apache.org.
PHOENIX-1646 Views and functional index expressions may lose information when stringified


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/abeaa74a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/abeaa74a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/abeaa74a

Branch: refs/heads/calcite
Commit: abeaa74ad35e145fcae40f239437e1b5964bcd72
Parents: 2d5913b
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Feb 9 16:36:34 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Feb 9 18:37:14 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/ViewIT.java |  27 ++
 .../phoenix/compile/CreateTableCompiler.java    |   4 +-
 .../phoenix/compile/ExpressionCompiler.java     |  22 +-
 .../expression/ComparisonExpression.java        |  12 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |  37 +--
 .../org/apache/phoenix/parse/AddParseNode.java  |   6 +
 .../AggregateFunctionWithinGroupParseNode.java  |  52 +++
 .../org/apache/phoenix/parse/AliasedNode.java   |  35 ++
 .../org/apache/phoenix/parse/AndParseNode.java  |  14 +
 .../phoenix/parse/ArithmeticParseNode.java      |  15 +
 .../parse/ArrayAllAnyComparisonNode.java        |  49 +++
 .../phoenix/parse/ArrayAllComparisonNode.java   |   3 +-
 .../phoenix/parse/ArrayAnyComparisonNode.java   |   3 +-
 .../phoenix/parse/ArrayConstructorNode.java     |  17 +
 .../apache/phoenix/parse/ArrayElemRefNode.java  |  11 +
 .../apache/phoenix/parse/BetweenParseNode.java  |  18 +-
 .../org/apache/phoenix/parse/BindParseNode.java |  12 +-
 .../org/apache/phoenix/parse/BindTableNode.java |   8 +
 .../org/apache/phoenix/parse/CaseParseNode.java |  20 ++
 .../org/apache/phoenix/parse/CastParseNode.java |  58 ++--
 .../org/apache/phoenix/parse/ColumnDef.java     |  26 +-
 .../apache/phoenix/parse/ColumnParseNode.java   |  47 ++-
 .../phoenix/parse/ComparisonParseNode.java      |  10 +
 .../apache/phoenix/parse/CompoundParseNode.java |   5 -
 .../apache/phoenix/parse/ConcreteTableNode.java |  19 ++
 .../apache/phoenix/parse/DerivedTableNode.java  |  27 ++
 .../phoenix/parse/DistinctCountParseNode.java   |  16 +
 .../apache/phoenix/parse/DivideParseNode.java   |   7 +
 .../apache/phoenix/parse/ExistsParseNode.java   |   9 +
 .../phoenix/parse/FamilyWildcardParseNode.java  |   8 +
 .../apache/phoenix/parse/FunctionParseNode.java |  36 ++-
 .../java/org/apache/phoenix/parse/HintNode.java |  36 +++
 .../apache/phoenix/parse/InListParseNode.java   |  19 ++
 .../org/apache/phoenix/parse/InParseNode.java   |  11 +
 .../apache/phoenix/parse/IsNullParseNode.java   |  10 +
 .../org/apache/phoenix/parse/JoinTableNode.java |  51 +++
 .../org/apache/phoenix/parse/LikeParseNode.java |  12 +
 .../org/apache/phoenix/parse/LimitNode.java     |  29 ++
 .../apache/phoenix/parse/LiteralParseNode.java  |  28 +-
 .../apache/phoenix/parse/ModulusParseNode.java  |   6 +
 .../apache/phoenix/parse/MultiplyParseNode.java |   6 +
 .../org/apache/phoenix/parse/NamedNode.java     |   6 +-
 .../apache/phoenix/parse/NamedParseNode.java    |  17 +-
 .../apache/phoenix/parse/NamedTableNode.java    |  38 +++
 .../org/apache/phoenix/parse/NotParseNode.java  |   9 +
 .../org/apache/phoenix/parse/OrParseNode.java   |  15 +
 .../org/apache/phoenix/parse/OrderByNode.java   |  34 +-
 .../phoenix/parse/OuterJoinParseNode.java       |  47 ---
 .../org/apache/phoenix/parse/ParseNode.java     |  11 +
 .../apache/phoenix/parse/ParseNodeFactory.java  |  34 +-
 .../parse/RowValueConstructorParseNode.java     |  16 +
 .../apache/phoenix/parse/SelectStatement.java   |  99 ++++++
 .../phoenix/parse/SequenceValueParseNode.java   |  10 +
 .../phoenix/parse/StringConcatParseNode.java    |  14 +
 .../apache/phoenix/parse/SubqueryParseNode.java |   8 +
 .../apache/phoenix/parse/SubtractParseNode.java |   7 +
 .../org/apache/phoenix/parse/TableName.java     |   4 +-
 .../org/apache/phoenix/parse/TableNode.java     |  10 +
 .../phoenix/parse/TableWildcardParseNode.java   |   7 +
 .../apache/phoenix/parse/WildcardParseNode.java |  16 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  10 +-
 .../org/apache/phoenix/schema/types/PDate.java  |   5 +-
 .../apache/phoenix/schema/types/PVarchar.java   |   3 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |   7 +-
 .../java/org/apache/phoenix/util/QueryUtil.java |  19 +-
 .../org/apache/phoenix/util/StringUtil.java     |   5 +
 .../phoenix/compile/WhereCompilerTest.java      |  84 ++---
 .../phoenix/compile/WhereOptimizerTest.java     |   3 +-
 .../apache/phoenix/parse/QueryParserTest.java   | 318 +++++++++++--------
 .../query/BaseConnectionlessQueryTest.java      |  23 ++
 .../phoenix/schema/types/PDataTypeTest.java     |   7 +
 71 files changed, 1357 insertions(+), 370 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 9a89531..db1e58f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -272,6 +272,33 @@ public class ViewIT extends BaseViewIT {
     }
     
     @Test
+    public void testViewWithCurrentDate() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 DATE)";
+        conn.createStatement().execute(ddl);
+        ddl = "CREATE VIEW v (v VARCHAR) AS SELECT * FROM t WHERE v2 > CURRENT_DATE()-5 AND v2 > DATE '2010-01-01'";
+        conn.createStatement().execute(ddl);
+        try {
+            conn.createStatement().execute("UPSERT INTO v VALUES(1)");
+            fail();
+        } catch (ReadOnlyTableException e) {
+            
+        }
+        for (int i = 0; i < 10; i++) {
+            conn.createStatement().execute("UPSERT INTO t VALUES(" + i + ", " + (i+10) + ",CURRENT_DATE()-" + i + ")");
+        }
+        conn.commit();
+        
+        int count = 0;
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM v");
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            count++;
+        }
+        assertEquals(5, count);
+    }
+
+    @Test
     public void testViewAndTableInDifferentSchemas() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
         String ddl = "CREATE TABLE s1.t (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index a95cd86..edee788 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
@@ -106,7 +106,9 @@ public class CreateTableCompiler {
                 Expression where = whereNode.accept(expressionCompiler);
                 if (where != null && !LiteralExpression.isTrue(where)) {
                     TableName baseTableName = create.getBaseTableName();
-                    viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), where);
+                    StringBuilder buf = new StringBuilder();
+                    whereNode.toSQL(resolver, buf);
+                    viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString());
                 }
                 if (viewTypeToBe != ViewType.MAPPED) {
                     Long scn = connection.getSCN();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 97818e6..81e4059 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -66,6 +66,8 @@ import org.apache.phoenix.expression.TimestampSubtractExpression;
 import org.apache.phoenix.expression.function.ArrayAllComparisonExpression;
 import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
 import org.apache.phoenix.expression.function.ArrayElemRefExpression;
+import org.apache.phoenix.expression.function.RoundDecimalExpression;
+import org.apache.phoenix.expression.function.RoundTimestampExpression;
 import org.apache.phoenix.parse.AddParseNode;
 import org.apache.phoenix.parse.AndParseNode;
 import org.apache.phoenix.parse.ArithmeticParseNode;
@@ -534,6 +536,24 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
         return true;
     }
 
+    // TODO: don't repeat this ugly cast logic (maybe use isCastable in the last else block.
+    private static Expression convertToRoundExpressionIfNeeded(PDataType fromDataType, PDataType targetDataType, List<Expression> expressions) throws SQLException {
+        Expression firstChildExpr = expressions.get(0);
+        if(fromDataType == targetDataType) {
+            return firstChildExpr;
+        } else if((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo(
+          PLong.INSTANCE)) {
+            return RoundDecimalExpression.create(expressions);
+        } else if((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo(
+          PDate.INSTANCE)) {
+            return RoundTimestampExpression.create(expressions);
+        } else if(fromDataType.isCastableTo(targetDataType)) {
+            return firstChildExpr;
+        } else {
+            throw TypeMismatchException.newException(fromDataType, targetDataType, firstChildExpr.toString());
+        }
+    }
+
     @Override
     public Expression visitLeave(CastParseNode node, List<Expression> children) throws SQLException {
         ParseNode childNode = node.getChildren().get(0);
@@ -553,7 +573,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
              * end up creating a RoundExpression. 
              */
             if (context.getResolver().getTables().get(0).getTable().getType() != PTableType.INDEX) {
-                expr =  CastParseNode.convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children);
+                expr =  convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children);
             }
         }
         return wrapGroupByExpression(CoerceExpression.create(expr, targetDataType, SortOrder.getDefault(), expr.getMaxLength()));  

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
index 4bfa0e9..b9190e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java
@@ -44,6 +44,7 @@ import org.apache.phoenix.schema.types.PUnsignedInt;
 import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.ExpressionUtil;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.collect.Lists;
@@ -57,15 +58,6 @@ import com.google.common.collect.Lists;
  */
 public class ComparisonExpression extends BaseCompoundExpression {
     private CompareOp op;
-    private static final String[] CompareOpString = new String[CompareOp.values().length];
-    static {
-        CompareOpString[CompareOp.EQUAL.ordinal()] = " = ";
-        CompareOpString[CompareOp.NOT_EQUAL.ordinal()] = " != ";
-        CompareOpString[CompareOp.GREATER.ordinal()] = " > ";
-        CompareOpString[CompareOp.LESS.ordinal()] = " < ";
-        CompareOpString[CompareOp.GREATER_OR_EQUAL.ordinal()] = " >= ";
-        CompareOpString[CompareOp.LESS_OR_EQUAL.ordinal()] = " <= ";
-    }
     
     private static void addEqualityExpression(Expression lhs, Expression rhs, List<Expression> andNodes, ImmutableBytesWritable ptr) throws SQLException {
         boolean isLHSNull = ExpressionUtil.isNull(lhs, ptr);
@@ -370,7 +362,7 @@ public class ComparisonExpression extends BaseCompoundExpression {
     }
     
     public static String toString(CompareOp op, List<Expression> children) {
-        return (children.get(0) + CompareOpString[op.ordinal()] + children.get(1));
+        return (children.get(0) + " " + QueryUtil.toSQL(op) + " " + children.get(1));
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 034c40a..154fef7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -28,7 +28,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
@@ -366,10 +365,6 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         return emptyResultSet;
     }
 
-    private static String escapePattern(String pattern) {
-        return StringEscapeUtils.escapeSql(pattern); // Need to escape double quotes
-    }
-
     public static final String GLOBAL_TENANANTS_ONLY = "null";
 
     private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
@@ -378,16 +373,16 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
             if (tenantId != null) {
                 appendConjunction(buf);
                 buf.append(" (" + TENANT_ID + " IS NULL " +
-                        " OR " + TENANT_ID + " = '" + escapePattern(tenantId.getString()) + "') ");
+                        " OR " + TENANT_ID + " = '" + StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
             }
         } else if (tenantIdPattern.length() == 0) {
                 appendConjunction(buf);
                 buf.append(TENANT_ID + " IS NULL ");
         } else {
             appendConjunction(buf);
-            buf.append(" TENANT_ID LIKE '" + escapePattern(tenantIdPattern) + "' ");
+            buf.append(" TENANT_ID LIKE '" + StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
             if (tenantId != null) {
-                buf.append(" and TENANT_ID + = '" + escapePattern(tenantId.getString()) + "' ");
+                buf.append(" and TENANT_ID + = '" + StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
             }
         }
     }
@@ -433,11 +428,11 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         addTenantIdFilter(where, catalog);
         if (schemaPattern != null) {
             appendConjunction(where);
-            where.append(TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + escapePattern(schemaPattern) + "'" ));
+            where.append(TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'" ));
         }
         if (tableNamePattern != null && tableNamePattern.length() > 0) {
             appendConjunction(where);
-            where.append(TABLE_NAME + " like '" + escapePattern(tableNamePattern) + "'" );
+            where.append(TABLE_NAME + " like '" + StringUtil.escapeStringConstant(tableNamePattern) + "'" );
         }
         // Allow a "." in columnNamePattern for column family match
         String colPattern = null;
@@ -455,11 +450,11 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
             if (cfPattern != null && cfPattern.length() > 0) { // if null or empty, will pick up all columns
                 // Will pick up only KV columns
                 appendConjunction(where);
-                where.append(COLUMN_FAMILY + " like '" + escapePattern(cfPattern) + "'" );
+                where.append(COLUMN_FAMILY + " like '" + StringUtil.escapeStringConstant(cfPattern) + "'" );
             }
             if (colPattern != null && colPattern.length() > 0) {
                 appendConjunction(where);
-                where.append(COLUMN_NAME + " like '" + escapePattern(colPattern) + "'" );
+                where.append(COLUMN_NAME + " like '" + StringUtil.escapeStringConstant(colPattern) + "'" );
             }
         }
         if (colPattern == null) {
@@ -680,8 +675,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 ARRAY_SIZE +
                 "\nfrom " + SYSTEM_CATALOG +
                 "\nwhere ");
-        buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = '" + escapePattern(schema) + "'" ));
-        buf.append("\nand " + DATA_TABLE_NAME + " = '" + escapePattern(table) + "'" );
+        buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = '" + StringUtil.escapeStringConstant(schema) + "'" ));
+        buf.append("\nand " + DATA_TABLE_NAME + " = '" + StringUtil.escapeStringConstant(table) + "'" );
         buf.append("\nand " + COLUMN_NAME + " is not null" );
         addTenantIdFilter(buf, catalog);
         buf.append("\norder by INDEX_NAME," + ORDINAL_POSITION);
@@ -825,8 +820,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 VIEW_CONSTANT +
                 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
                 " where ");
-        buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = '" + escapePattern(schema) + "'" ));
-        buf.append(" and " + TABLE_NAME + " = '" + escapePattern(table) + "'" );
+        buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = '" + StringUtil.escapeStringConstant(schema) + "'" ));
+        buf.append(" and " + TABLE_NAME + " = '" + StringUtil.escapeStringConstant(table) + "'" );
         buf.append(" and " + COLUMN_NAME + " is not null");
         buf.append(" and " + COLUMN_FAMILY + " is null");
         addTenantIdFilter(buf, catalog);
@@ -891,7 +886,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 " where " + COLUMN_NAME + " is null");
         this.addTenantIdFilter(buf, catalog);
         if (schemaPattern != null) {
-            buf.append(" and " + TABLE_SCHEM + " like '" + escapePattern(schemaPattern) + "'");
+            buf.append(" and " + TABLE_SCHEM + " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'");
         }
         Statement stmt = connection.createStatement();
         return stmt.executeQuery(buf.toString());
@@ -919,10 +914,10 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 " and " + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue());
         addTenantIdFilter(buf, catalog);
         if (schemaPattern != null) {
-            buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + escapePattern(schemaPattern) + "'" ));
+            buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'" ));
         }
         if (tableNamePattern != null) {
-            buf.append(" and " + TABLE_NAME + " like '" + escapePattern(tableNamePattern) + "'" );
+            buf.append(" and " + TABLE_NAME + " like '" + StringUtil.escapeStringConstant(tableNamePattern) + "'" );
         }
         buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," +TABLE_NAME + "," + SUPERTABLE_NAME);
         Statement stmt = connection.createStatement();
@@ -1017,10 +1012,10 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 " and " + COLUMN_FAMILY + " is null");
         addTenantIdFilter(buf, catalog);
         if (schemaPattern != null) {
-            buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + escapePattern(schemaPattern) + "'" ));
+            buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'" ));
         }
         if (tableNamePattern != null) {
-            buf.append(" and " + TABLE_NAME + " like '" + escapePattern(tableNamePattern) + "'" );
+            buf.append(" and " + TABLE_NAME + " like '" + StringUtil.escapeStringConstant(tableNamePattern) + "'" );
         }
         if (types != null && types.length > 0) {
             buf.append(" and " + TABLE_TYPE + " IN (");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/AddParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AddParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AddParseNode.java
index f855ada..fa04a55 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/AddParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AddParseNode.java
@@ -31,7 +31,13 @@ import java.util.List;
  * @since 0.1
  */
 public class AddParseNode extends ArithmeticParseNode {
+    public static final String OPERATOR = "+";
 
+    @Override
+    public String getOperator() {
+        return OPERATOR;
+    }
+    
     AddParseNode(List<ParseNode> children) {
         super(children);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java
new file mode 100644
index 0000000..5c32908
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.util.List;
+
+import org.apache.phoenix.compile.ColumnResolver;
+
+public class AggregateFunctionWithinGroupParseNode extends AggregateFunctionParseNode {
+
+    public AggregateFunctionWithinGroupParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append(getName());
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        List<ParseNode> args = children.subList(2, children.size());
+        if (!args.isEmpty()) {
+            for (ParseNode child : args) {
+                child.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(')');
+        
+        buf.append(" WITHIN GROUP (ORDER BY ");
+        children.get(0).toSQL(resolver, buf);
+        buf.append(" " + (LiteralParseNode.TRUE.equals(children.get(1)) ? "ASC" : "DESC"));
+        buf.append(')');
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/AliasedNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AliasedNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AliasedNode.java
index f5dec8d..807a01f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/AliasedNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AliasedNode.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.parse;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.util.SchemaUtil;
 
 /**
@@ -45,6 +46,40 @@ public class AliasedNode {
         return node;
     }
 
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        node.toSQL(resolver, buf);
+        if (alias != null) {
+            buf.append(' ');
+            if (isCaseSensitve) buf.append('"');
+            buf.append(alias);
+            if (isCaseSensitve) buf.append('"');
+        }
+    }
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((alias == null) ? 0 : alias.hashCode());
+        result = prime * result + ((node == null) ? 0 : node.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        AliasedNode other = (AliasedNode)obj;
+        if (alias == null) {
+            if (other.alias != null) return false;
+        } else if (!alias.equals(other.alias)) return false;
+        if (node == null) {
+            if (other.node != null) return false;
+        } else if (!node.equals(other.node)) return false;
+        return true;
+    }
+
     public boolean isCaseSensitve() {
         return isCaseSensitve;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/AndParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AndParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AndParseNode.java
index e8c6138..3c333c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/AndParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AndParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 
@@ -32,6 +34,7 @@ import java.util.List;
  * @since 0.1
  */
 public class AndParseNode extends CompoundParseNode {
+    public static final String NAME = "AND";
 
     AndParseNode(List<ParseNode> children) {
         super(children);
@@ -46,4 +49,15 @@ public class AndParseNode extends CompoundParseNode {
         return visitor.visitLeave(this, l);
     }
     
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        for (int i = 1 ; i < children.size(); i++) {
+            buf.append(" " + NAME + " ");
+            children.get(i).toSQL(resolver, buf);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java
index ca4b5f2..1a2f170 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java
@@ -19,10 +19,25 @@ package org.apache.phoenix.parse;
 
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 public abstract class ArithmeticParseNode extends CompoundParseNode {
 
     public ArithmeticParseNode(List<ParseNode> children) {
         super(children);
     }
 
+    public abstract String getOperator();
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        for (int i = 1 ; i < children.size(); i++) {
+            buf.append(" " + getOperator() + " ");
+            children.get(i).toSQL(resolver, buf);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java
new file mode 100644
index 0000000..bdb50f9
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.phoenix.compile.ColumnResolver;
+import org.apache.phoenix.util.QueryUtil;
+
+public abstract class ArrayAllAnyComparisonNode extends CompoundParseNode {
+
+    public ArrayAllAnyComparisonNode(List<ParseNode> children) {
+        super(children);
+    }
+
+    public abstract String getType();
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        ParseNode rhs = children.get(0);
+        ComparisonParseNode comp = (ComparisonParseNode)children.get(1);
+        ParseNode lhs = comp.getLHS();
+        CompareOp op = comp.getFilterOp();
+        buf.append(' ');
+        lhs.toSQL(resolver, buf);
+        buf.append(" " + QueryUtil.toSQL(op) + " ");
+        buf.append(getType());
+        buf.append('(');
+        rhs.toSQL(resolver, buf);
+        buf.append(')');
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java
index b31b3ae..98371a5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java
@@ -25,12 +25,13 @@ import java.util.List;
  * The Expression a = ALL(b) where b is of type array is rewritten in this
  * node as ALL(a = b(n))
  */
-public class ArrayAllComparisonNode extends CompoundParseNode {
+public class ArrayAllComparisonNode extends ArrayAllAnyComparisonNode {
 
     ArrayAllComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) {
         super(Arrays.<ParseNode>asList(rhs, compareNode));
     }
     
+    @Override
     public String getType() {
         return "ALL";
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java
index daca86d..a4662b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java
@@ -25,12 +25,13 @@ import java.util.List;
  * The Expression a = ANY(b) where b is of type array is rewritten in this
  * node as ANY(a = b(n))
  */
-public class ArrayAnyComparisonNode extends CompoundParseNode {
+public class ArrayAnyComparisonNode extends ArrayAllAnyComparisonNode {
 
     ArrayAnyComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) {
         super(Arrays.<ParseNode>asList(rhs, compareNode));
     }
     
+    @Override
     public String getType() {
         return "ANY";
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java
index a959ba7..9b6a6be 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java
@@ -21,6 +21,9 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+import org.apache.phoenix.schema.types.PArrayDataType;
+
 /**
  * Holds the list of array elements that will be used by the upsert stmt with ARRAY column 
  *
@@ -39,4 +42,18 @@ public class ArrayConstructorNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
 	}
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append(PArrayDataType.ARRAY_TYPE_SUFFIX);
+        buf.append('[');
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        for (int i = 1 ; i < children.size(); i++) {
+            buf.append(',');
+            children.get(i).toSQL(resolver, buf);
+        }
+        buf.append(']');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java
index da69de2..b3c4ad9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 public class ArrayElemRefNode extends CompoundParseNode {
 
     public ArrayElemRefNode(List<ParseNode> parseNode) {
@@ -35,4 +37,13 @@ public class ArrayElemRefNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        buf.append('[');
+        children.get(1).toSQL(resolver, buf);
+        buf.append(']');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
index 961af20..6d82117 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java
@@ -18,7 +18,11 @@
 package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.phoenix.compile.ColumnResolver;
 
 
 
@@ -71,4 +75,16 @@ public class BetweenParseNode extends CompoundParseNode {
 			return false;
 		return true;
 	}
+
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        if (negate) buf.append(" NOT");
+        buf.append(" BETWEEN ");
+        children.get(1).toSQL(resolver, buf);
+        buf.append(" AND ");
+        children.get(2).toSQL(resolver, buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
index 5f649de..42e42bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindParseNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -51,11 +53,6 @@ public class BindParseNode extends NamedParseNode {
         return true;
     }
     
-    @Override
-    public String toString() {
-        return ":" + index;
-    }
-
 	@Override
 	public int hashCode() {
 		final int prime = 31;
@@ -78,4 +75,9 @@ public class BindParseNode extends NamedParseNode {
 		return true;
 	}
 
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(':');
+        buf.append(index);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindTableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindTableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindTableNode.java
index 52a8948..3895dd1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/BindTableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/BindTableNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -39,5 +41,11 @@ public class BindTableNode extends ConcreteTableNode {
         return visitor.visit(this);
     }
 
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(this.getName().toString());
+        if (this.getAlias() != null) buf.append(" " + this.getAlias());
+        buf.append(' ');
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/CaseParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CaseParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CaseParseNode.java
index 111b9c6..9467e68 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CaseParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CaseParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -44,4 +46,22 @@ public class CaseParseNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append("CASE ");
+        List<ParseNode> children = getChildren();
+        for (int i = 0; i < children.size() - 1; i+=2) {
+            buf.append("WHEN ");
+            children.get(i+1).toSQL(resolver, buf);
+            buf.append(" THEN ");
+            children.get(i).toSQL(resolver, buf);
+        }
+        if (children.size() % 2 != 0) { // has ELSE
+            buf.append(" ELSE ");
+            children.get(children.size()-1).toSQL(resolver, buf);
+        }
+        buf.append(" END ");
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
index 598a190..78be616 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CastParseNode.java
@@ -21,16 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.expression.function.RoundDecimalExpression;
-import org.apache.phoenix.expression.function.RoundTimestampExpression;
-import org.apache.phoenix.schema.types.PDate;
-import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PLong;
-import org.apache.phoenix.schema.types.PTimestamp;
-import org.apache.phoenix.schema.types.PUnsignedTimestamp;
-import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.util.SchemaUtil;
 
 /**
@@ -42,7 +34,6 @@ import org.apache.phoenix.util.SchemaUtil;
  *
  */
 public class CastParseNode extends UnaryParseNode {
-	
 	private final PDataType dt;
     private final Integer maxLength;
     private final Integer scale;
@@ -83,28 +74,6 @@ public class CastParseNode extends UnaryParseNode {
         return scale;
     }
 
-    // TODO: don't repeat this ugly cast logic (maybe use isCastable in the last else block.
-    public static Expression convertToRoundExpressionIfNeeded(PDataType fromDataType, PDataType targetDataType, List<Expression> expressions) throws SQLException {
-	    Expression firstChildExpr = expressions.get(0);
-	    if(fromDataType == targetDataType) {
-	        return firstChildExpr;
-//        } else if((fromDataType == PDataType.DATE || fromDataType == PDataType.UNSIGNED_DATE) && targetDataType.isCoercibleTo(PDataType.LONG)) {
-//            return firstChildExpr;
-//        } else if(fromDataType.isCoercibleTo(PDataType.LONG) && (targetDataType == PDataType.DATE || targetDataType == PDataType.UNSIGNED_DATE)) {
-//            return firstChildExpr;
-	    } else if((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo(
-          PLong.INSTANCE)) {
-	        return RoundDecimalExpression.create(expressions);
-	    } else if((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo(
-          PDate.INSTANCE)) {
-	        return RoundTimestampExpression.create(expressions);
-	    } else if(fromDataType.isCastableTo(targetDataType)) {
-	        return firstChildExpr;
-        } else {
-            throw TypeMismatchException.newException(fromDataType, targetDataType, firstChildExpr.toString());
-	    }
-	}
-
 	@Override
 	public int hashCode() {
 		final int prime = 31;
@@ -142,4 +111,29 @@ public class CastParseNode extends UnaryParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        buf.append(" CAST(");
+        children.get(0).toSQL(resolver, buf);
+        buf.append(" AS ");
+        boolean isArray = dt.isArrayType();
+        PDataType type = isArray ? PDataType.arrayBaseType(dt) : dt;
+        buf.append(type.getSqlTypeName());
+        if (maxLength != null) {
+            buf.append('(');
+            buf.append(maxLength);
+            if (scale != null) {
+              buf.append(',');
+              buf.append(scale); // has both max length and scale. For ex- decimal(10,2)
+            }       
+            buf.append(')');
+       }
+        if (isArray) {
+            buf.append(' ');
+            buf.append(PDataType.ARRAY_TYPE_SUFFIX);
+            buf.append(' ');
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
index 8032ba5..cde3e9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
@@ -21,13 +21,13 @@ import java.sql.SQLException;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDecimal;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDecimal;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.base.Preconditions;
@@ -189,4 +189,26 @@ public class ColumnDef {
 	public String getExpression() {
 		return expressionStr;
 	}
+	
+	@Override
+    public String toString() {
+	    StringBuilder buf = new StringBuilder(columnDefName.getColumnNode().toString());
+	    buf.append(' ');
+        buf.append(dataType.getSqlTypeName());
+        if (maxLength != null) {
+            buf.append('(');
+            buf.append(maxLength);
+            if (scale != null) {
+              buf.append(',');
+              buf.append(scale); // has both max length and scale. For ex- decimal(10,2)
+            }       
+            buf.append(')');
+       }
+        if (isArray) {
+            buf.append(' ');
+            buf.append(PDataType.ARRAY_TYPE_SUFFIX);
+            buf.append(' ');
+        }
+	    return buf.toString();
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
index e7489fd..80c5d0f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
@@ -19,7 +19,12 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.SchemaUtil;
 
 /**
  * Node representing a reference to a column in a SQL expression
@@ -69,11 +74,6 @@ public class ColumnParseNode extends NamedParseNode {
     }
 
     @Override
-    public String toString() {
-        return fullName;
-    }
-
-    @Override
     public int hashCode() {
         return fullName.hashCode();
     }
@@ -90,4 +90,41 @@ public class ColumnParseNode extends NamedParseNode {
     public boolean isTableNameCaseSensitive() {
         return tableName == null ? false : tableName.isTableNameCaseSensitive();
     }
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        // If resolver is not null, then resolve to get fully qualified name
+        String tableName = null;
+        if (resolver == null) {
+            if (this.tableName != null) {
+                tableName = this.tableName.getTableName();
+            }
+        } else {
+            try {
+                ColumnRef ref = resolver.resolveColumn(this.getSchemaName(), this.getTableName(), this.getName());
+                PColumn column = ref.getColumn();
+                if (!SchemaUtil.isPKColumn(column)) {
+                    PTable table = ref.getTable();
+                    String defaultFamilyName = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : table.getDefaultFamilyName().getString();
+                    // Translate to the data table column name
+                    String dataFamilyName = column.getFamilyName().getString() ;
+                    tableName = defaultFamilyName.equals(dataFamilyName) ? null : dataFamilyName;
+                }
+                
+            } catch (SQLException e) {
+                throw new RuntimeException(e); // Already resolved, so not possible
+            }
+        }
+        if (tableName != null) {
+            if (isTableNameCaseSensitive()) {
+                buf.append('"');
+                buf.append(tableName);
+                buf.append('"');
+            } else {
+                buf.append(tableName);
+            }
+            buf.append('.');
+        }
+        toSQL(buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
index 6f8e372..51cb833 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java
@@ -22,6 +22,8 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.phoenix.compile.ColumnResolver;
+import org.apache.phoenix.util.QueryUtil;
 
 /**
  * 
@@ -54,4 +56,12 @@ public abstract class ComparisonParseNode extends BinaryParseNode {
      * Return the inverted operator for the CompareOp
      */
     public abstract CompareFilter.CompareOp getInvertFilterOp();
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        buf.append(" " + QueryUtil.toSQL(getFilterOp()) + " ");
+        children.get(1).toSQL(resolver, buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
index e0ab22b..fd5d73e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java
@@ -68,11 +68,6 @@ public abstract class CompoundParseNode extends ParseNode {
     }
 
     @Override
-    public String toString() {
-        return this.getClass().getName() + children.toString();
-    }
-    
-    @Override
 	public int hashCode() {
 		final int prime = 31;
 		int result = 1;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java
index 05604d7..640ee7b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java
@@ -38,5 +38,24 @@ public abstract class ConcreteTableNode extends TableNode {
         return name;
     }
 
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((name == null) ? 0 : name.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        ConcreteTableNode other = (ConcreteTableNode)obj;
+        if (name == null) {
+            if (other.name != null) return false;
+        } else if (!name.equals(other.name)) return false;
+        return true;
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java
index b86c76d..d1ceb89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.util.SchemaUtil;
 
 
@@ -48,4 +49,30 @@ public class DerivedTableNode extends TableNode {
         return visitor.visit(this);
     }
 
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        select.toSQL(resolver, buf);
+        buf.append(')');
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((select == null) ? 0 : select.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        DerivedTableNode other = (DerivedTableNode)obj;
+        if (select == null) {
+            if (other.select != null) return false;
+        } else if (!select.equals(other.select)) return false;
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java
index 846da8a..8dc596c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.parse;
 import java.sql.SQLException;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.function.DistinctCountAggregateFunction;
@@ -41,4 +42,19 @@ public class DistinctCountParseNode extends DelegateConstantToCountParseNode {
             throws SQLException {
         return new DistinctCountAggregateFunction(children, getDelegateFunction(children, context));
     }
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append("COUNT(DISTINCT ");
+        List<ParseNode> children = getChildren();
+        if (!children.isEmpty()) {
+            for (ParseNode child : children) {
+                child.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/DivideParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/DivideParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/DivideParseNode.java
index f3ed117..1a2e1f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/DivideParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/DivideParseNode.java
@@ -31,6 +31,13 @@ import java.util.List;
  * @since 0.1
  */
 public class DivideParseNode extends ArithmeticParseNode {
+    public static final String OPERATOR = "/";
+
+    @Override
+    public String getOperator() {
+        return OPERATOR;
+    }
+    
 
     DivideParseNode(List<ParseNode> children) {
         super(children);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
index fde7d76..95d5e20 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -72,4 +74,11 @@ public class ExistsParseNode extends UnaryParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        if (negate) buf.append(" NOT");
+        buf.append(" EXISTS ");
+        getChildren().get(0).toSQL(resolver, buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
index 2c939fc..80a08bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 /**
  * 
  * Node representing the selection of all columns of a family (cf.*) in the SELECT clause of SQL
@@ -71,5 +73,11 @@ public class FamilyWildcardParseNode extends NamedParseNode {
 			return false;
 		return true;
 	}
+	
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        toSQL(buf);
+        buf.append(".*");
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
index c41fa4f..9764f52 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
@@ -31,9 +31,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.http.annotation.Immutable;
-
-import com.google.common.collect.ImmutableSet;
-
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
@@ -41,12 +39,14 @@ import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.function.AggregateFunction;
 import org.apache.phoenix.expression.function.FunctionExpression;
 import org.apache.phoenix.schema.ArgumentTypeMismatchException;
+import org.apache.phoenix.schema.ValueRangeExcpetion;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDataTypeFactory;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.ValueRangeExcpetion;
 import org.apache.phoenix.util.SchemaUtil;
 
+import com.google.common.collect.ImmutableSet;
+
 
 
 /**
@@ -83,18 +83,6 @@ public class FunctionParseNode extends CompoundParseNode {
         return visitor.visitLeave(this, l);
     }
 
-    @Override
-    public String toString() {
-        StringBuilder buf = new StringBuilder(name + "(");
-        for (ParseNode child : getChildren()) {
-            buf.append(child.toString());
-            buf.append(',');
-        }
-        buf.setLength(buf.length()-1);
-        buf.append(')');
-        return buf.toString();
-    }
-
     public boolean isAggregate() {
         return getInfo().isAggregate();
     }
@@ -459,4 +447,20 @@ public class FunctionParseNode extends CompoundParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append(name);
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        if (!children.isEmpty()) {
+            for (ParseNode child : children) {
+                child.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
index 94f9bfb..ce20208 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
@@ -21,6 +21,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -195,4 +196,39 @@ public class HintNode {
     public boolean hasHint(Hint hint) {
         return hints.containsKey(hint);
     }
+    
+    @Override
+    public String toString() {
+        if (hints.isEmpty()) {
+            return StringUtil.EMPTY_STRING;
+        }
+        StringBuilder buf = new StringBuilder("/*+ ");
+        for (Map.Entry<Hint, String> entry : hints.entrySet()) {
+            buf.append(entry.getKey());
+            buf.append(entry.getValue());
+            buf.append(' ');
+        }
+        buf.append("*/ ");
+        return buf.toString();
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((hints == null) ? 0 : hints.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        HintNode other = (HintNode)obj;
+        if (hints == null) {
+            if (other.hints != null) return false;
+        } else if (!hints.equals(other.hints)) return false;
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
index fae15f5..b029076 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/InListParseNode.java
@@ -21,6 +21,7 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 
@@ -83,4 +84,22 @@ public class InListParseNode extends CompoundParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        buf.append(' ');
+        if (negate) buf.append("NOT ");
+        buf.append("IN");
+        buf.append('(');
+        if (children.size() > 1) {
+            for (int i = 1; i < children.size(); i++) {
+                children.get(i).toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
index 84984e9..9003fc8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/InParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -81,4 +83,13 @@ public class InParseNode extends BinaryParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        getChildren().get(0).toSQL(resolver, buf);
+        if (negate) buf.append(" NOT");
+        buf.append(" IN (");
+        getChildren().get(1).toSQL(resolver, buf);
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
index 614cfd0..fafa9d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -72,4 +74,12 @@ public class IsNullParseNode extends UnaryParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        getChildren().get(0).toSQL(resolver, buf);
+        buf.append(" IS");
+        if (negate) buf.append(" NOT");
+        buf.append(" NULL ");
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/JoinTableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/JoinTableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/JoinTableNode.java
index 5dd13f0..d30e4ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/JoinTableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/JoinTableNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -78,5 +80,54 @@ public class JoinTableNode extends TableNode {
     public <T> T accept(TableNodeVisitor<T> visitor) throws SQLException {
         return visitor.visit(this);
     }
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(lhs);
+        buf.append(' ');
+        if (onNode == null) {
+            buf.append(',');
+            buf.append(rhs);
+        } else {
+            buf.append(type);
+            buf.append(" JOIN ");
+            buf.append(rhs);
+            buf.append(" ON (");
+            onNode.toSQL(resolver, buf);
+            buf.append(')');
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((lhs == null) ? 0 : lhs.hashCode());
+        result = prime * result + ((onNode == null) ? 0 : onNode.hashCode());
+        result = prime * result + ((rhs == null) ? 0 : rhs.hashCode());
+        result = prime * result + (singleValueOnly ? 1231 : 1237);
+        result = prime * result + ((type == null) ? 0 : type.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        JoinTableNode other = (JoinTableNode)obj;
+        if (lhs == null) {
+            if (other.lhs != null) return false;
+        } else if (!lhs.equals(other.lhs)) return false;
+        if (onNode == null) {
+            if (other.onNode != null) return false;
+        } else if (!onNode.equals(other.onNode)) return false;
+        if (rhs == null) {
+            if (other.rhs != null) return false;
+        } else if (!rhs.equals(other.rhs)) return false;
+        if (singleValueOnly != other.singleValueOnly) return false;
+        if (type != other.type) return false;
+        return true;
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
index 41d252d..8a510d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LikeParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -85,4 +87,14 @@ public class LikeParseNode extends BinaryParseNode {
 			return false;
 		return true;
 	}
+
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        if (negate) buf.append(" NOT");
+        buf.append(" " + (likeType == LikeType.CASE_SENSITIVE ? "LIKE" : "ILIKE") + " ");
+        children.get(1).toSQL(resolver, buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/LimitNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LimitNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LimitNode.java
index 1cb77e8..135cf54 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LimitNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LimitNode.java
@@ -35,4 +35,33 @@ public class LimitNode {
     public ParseNode getLimitParseNode() {
         return bindNode == null ? limitNode : bindNode;
     }
+    
+    @Override
+    public String toString() {
+        return bindNode == null ? limitNode.toString() : bindNode.toString();
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((bindNode == null) ? 0 : bindNode.hashCode());
+        result = prime * result + ((limitNode == null) ? 0 : limitNode.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        LimitNode other = (LimitNode)obj;
+        if (bindNode == null) {
+            if (other.bindNode != null) return false;
+        } else if (!bindNode.equals(other.bindNode)) return false;
+        if (limitNode == null) {
+            if (other.limitNode != null) return false;
+        } else if (!limitNode.equals(other.limitNode)) return false;
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
index 9e9184f..e0e8c3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
@@ -21,8 +21,9 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PTimestamp;
 
 /**
  * 
@@ -36,6 +37,8 @@ public class LiteralParseNode extends TerminalParseNode {
     public static final ParseNode NULL = new LiteralParseNode(null);
     public static final ParseNode ZERO = new LiteralParseNode(0);
     public static final ParseNode ONE = new LiteralParseNode(1);
+    public static final ParseNode MINUS_ONE = new LiteralParseNode(-1L);
+    public static final ParseNode TRUE = new LiteralParseNode(true);
     
     private final Object value;
     private final PDataType type;
@@ -76,11 +79,6 @@ public class LiteralParseNode extends TerminalParseNode {
         return type == null ? null : type.toBytes(value);
     }
     
-    @Override
-    public String toString() {
-        return type == PVarchar.INSTANCE ? ("'" + value.toString() + "'") : value == null ? "null" : value.toString();
-    }
-
 	@Override
 	public int hashCode() {
 		final int prime = 31;
@@ -99,6 +97,24 @@ public class LiteralParseNode extends TerminalParseNode {
 		if (getClass() != obj.getClass())
 			return false;
 		LiteralParseNode other = (LiteralParseNode) obj;
+		if (value == other.value) return true;
+		if (type == null) return false;
 		return type.isComparableTo(other.type) && type.compareTo(value, other.value, other.type) == 0;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        if (value == null) {
+            buf.append(" null ");
+        } else {
+            // TODO: move into PDataType?
+            if (type.isCoercibleTo(PTimestamp.INSTANCE)) {
+                buf.append(type);
+                buf.append(' ');
+                buf.append(type.toStringLiteral(value, null));
+            } else {
+                buf.append(type.toStringLiteral(value, null));
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java
index 553e13f..15d539d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java
@@ -31,6 +31,12 @@ import java.util.List;
  * @since 0.1
  */
 public class ModulusParseNode extends ArithmeticParseNode {
+    public static final String OPERATOR = "%";
+
+    @Override
+    public String getOperator() {
+        return OPERATOR;
+    }
 
     ModulusParseNode(List<ParseNode> children) {
         super(children);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java
index 17016a4..1fc5436 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java
@@ -31,6 +31,12 @@ import java.util.List;
  * @since 0.1
  */
 public class MultiplyParseNode extends ArithmeticParseNode {
+    public static final String OPERATOR = "*";
+
+    @Override
+    public String getOperator() {
+        return OPERATOR;
+    }
 
     MultiplyParseNode(List<ParseNode> children) {
         super(children);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
index 6cfeb60..3f1becc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedNode.java
@@ -59,5 +59,9 @@ public class NamedNode {
         NamedNode other = (NamedNode)obj;
         return name.equals(other.name);
     }
-
+    
+    @Override
+    public String toString() {
+        return (isCaseSensitive ? "\"" : "" ) + name + (isCaseSensitive ? "\"" : "" );
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
index 51da80a..32dfc49 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedParseNode.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.parse;
 
 
+
 /**
  * 
  * Abstract node representing named nodes such as binds and column expressions in SQL
@@ -48,11 +49,6 @@ public abstract class NamedParseNode extends TerminalParseNode{
         return namedNode.isCaseSensitive();
     }
     
-    @Override
-    public String toString() {
-        return getName();
-    }
-
 	@Override
 	public int hashCode() {
 		final int prime = 31;
@@ -78,4 +74,15 @@ public abstract class NamedParseNode extends TerminalParseNode{
 			return false;
 		return true;
 	}
+
+    
+    public void toSQL(StringBuilder buf) {
+        if (isCaseSensitive()) {
+            buf.append('"');
+            buf.append(getName());
+            buf.append('"');
+        } else {
+            buf.append(getName());
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedTableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedTableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedTableNode.java
index 9379919..4e0906f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedTableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NamedTableNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 import com.google.common.collect.ImmutableList;
 
 /**
@@ -59,5 +61,41 @@ public class NamedTableNode extends ConcreteTableNode {
     public List<ColumnDef> getDynamicColumns() {
         return dynColumns;
     }
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(this.getName().toString());
+        if (!dynColumns.isEmpty()) {
+            buf.append('(');
+            for (ColumnDef def : dynColumns) {
+                buf.append(def);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+            buf.append(')');
+        }
+        if (this.getAlias() != null) buf.append(" " + this.getAlias());
+        buf.append(' ');
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result + ((dynColumns == null) ? 0 : dynColumns.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (!super.equals(obj)) return false;
+        if (getClass() != obj.getClass()) return false;
+        NamedTableNode other = (NamedTableNode)obj;
+        if (dynColumns == null) {
+            if (other.dynColumns != null) return false;
+        } else if (!dynColumns.equals(other.dynColumns)) return false;
+        return true;
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/NotParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/NotParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/NotParseNode.java
index 9d87404..86ca1cf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/NotParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/NotParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -44,4 +46,11 @@ public class NotParseNode extends UnaryParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        buf.append(" NOT ");
+        children.get(0).toSQL(resolver, buf);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/OrParseNode.java
index 6531533..2a38819 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/OrParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -31,6 +33,7 @@ import java.util.List;
  * @since 0.1
  */
 public class OrParseNode extends CompoundParseNode {
+    public static final String NAME = "OR";
 
     OrParseNode(List<ParseNode> children) {
         super(children);
@@ -44,4 +47,16 @@ public class OrParseNode extends CompoundParseNode {
         }
         return visitor.visitLeave(this, l);
     }
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        for (int i = 1 ; i < children.size(); i++) {
+            buf.append(" " + NAME + " ");
+            children.get(i).toSQL(resolver, buf);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrderByNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrderByNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/OrderByNode.java
index bc24687..2451c4b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/OrderByNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/OrderByNode.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.parse;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 /**
  * 
@@ -47,9 +49,39 @@ public final class OrderByNode {
     public ParseNode getNode() {
         return child;
     }
-    
+ 
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((child == null) ? 0 : child.hashCode());
+        result = prime * result + (nullsLast ? 1231 : 1237);
+        result = prime * result + (orderAscending ? 1231 : 1237);
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        OrderByNode other = (OrderByNode)obj;
+        if (child == null) {
+            if (other.child != null) return false;
+        } else if (!child.equals(other.child)) return false;
+        if (nullsLast != other.nullsLast) return false;
+        if (orderAscending != other.orderAscending) return false;
+        return true;
+    }
+
     @Override
     public String toString() {
         return child.toString() + (orderAscending ? " asc" : " desc") + " nulls " + (nullsLast ? "last" : "first");
     }
+
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        child.toSQL(resolver, buf);
+        if (!orderAscending) buf.append(" DESC");
+        if (nullsLast) buf.append(" NULLS LAST ");
+    }
 }


[19/50] [abbrv] phoenix git commit: PHOENIX-1641 Addendum to the previous commit

Posted by ma...@apache.org.
PHOENIX-1641 Addendum to the previous commit


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/78d91d11
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/78d91d11
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/78d91d11

Branch: refs/heads/calcite
Commit: 78d91d11e52acf54486c94ad8a0ec7dda2c9eb21
Parents: 2b8e663
Author: Samarth <sa...@salesforce.com>
Authored: Sat Feb 7 10:42:44 2015 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Sat Feb 7 10:42:44 2015 -0800

----------------------------------------------------------------------
 .../phoenix/coprocessor/MetaDataProtocol.java   | 10 ++--
 .../query/ConnectionQueryServicesImpl.java      | 49 +++++++++++---------
 .../org/apache/phoenix/util/UpgradeUtil.java    | 12 +++--
 3 files changed, 42 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/78d91d11/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 357ebdd..be5fb4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -60,13 +60,17 @@ public abstract class MetaDataProtocol extends MetaDataService {
 
     public static final long MIN_TABLE_TIMESTAMP = 0;
 
-    // Incremented from 3 to 4 to salt the sequence table in 3.2/4.2
-    // Incremented from 5 to 6 with the addition of the STORE_NULLS table option
+    // Incremented from 5 to 6 with the addition of the STORE_NULLS table option in 4.3
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_TABLE_TIMESTAMP + 6;
     public static final int DEFAULT_MAX_META_DATA_VERSIONS = 1000;
     public static final int DEFAULT_MAX_STAT_DATA_VERSIONS = 3;
     public static final boolean DEFAULT_META_DATA_KEEP_DELETED_CELLS = true;
-
+    
+    // Min system table timestamps for every release.
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 = MIN_TABLE_TIMESTAMP + 3;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0 = MIN_TABLE_TIMESTAMP + 4;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1 = MIN_TABLE_TIMESTAMP + 5;
+    
     // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need
     // a different code for every type of error.
     // ENTITY_ALREADY_EXISTS, ENTITY_NOT_FOUND, NEWER_ENTITY_FOUND, ENTITY_NOT_IN_REGION, CONCURRENT_MODIFICATION

http://git-wip-us.apache.org/repos/asf/phoenix/blob/78d91d11/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4a9eac0..2b508b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1847,20 +1847,20 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include
                                 // any new columns we've added.
                                 long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
-                                
+
                                 // We know that we always need to add the STORE_NULLS column for 4.3 release
                                 String columnsToAdd = PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName();
-                                
-                                // If the server side schema is 4 versions behind then we need to add INDEX_TYPE
-                                // and INDEX_DISABLE_TIMESTAMP columns too.
+
+                                // If the server side schema is at before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then 
+                                // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too.
                                 // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed,
                                 // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all
                                 // the column names that have been added to SYSTEM.CATALOG since 4.0.
-                                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 4) {
+                                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
                                     columnsToAdd += ", " + PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
                                             + ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName();
                                 }
-                                
+
                                 // Ugh..need to assign to another local variable to keep eclipse happy.
                                 PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
                                         PhoenixDatabaseMetaData.SYSTEM_CATALOG,
@@ -1876,14 +1876,23 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             } catch (NewerTableAlreadyExistsException e) {
                                 // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed timestamp.
                                 // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp.
-                                Integer sequenceSaltBuckets = e.getTable().getBucketNum();
-                                nSequenceSaltBuckets = sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets;
+                                nSequenceSaltBuckets = getSaltBuckets(e);
                             } catch (TableAlreadyExistsException e) {
                                 // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to include
                                 // any new columns we've added.
                                 long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
-                                // if the table is at a timestamp corresponding to before 4.2.1 then run the upgrade script
-                                if (currentServerSideTableTimeStamp <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 2) {
+                                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
+                                    // If the table time stamp is before 4.1.0 then we need to add below columns
+                                    // to the SYSTEM.SEQUENCE table.
+                                    String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() 
+                                            + ", " + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName()
+                                            + ", " + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName()
+                                            + ", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                    addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
+                                }
+                                // If the table timestamp is before 4.2.1 then run the upgrade script
+                                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1) {
                                     if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSaltBuckets, e.getTable())) {
                                         metaConnection.removeTable(null,
                                                 PhoenixDatabaseMetaData.SEQUENCE_SCHEMA_NAME,
@@ -1896,16 +1905,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                         clearTableRegionCache(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
                                     }
                                     nSequenceSaltBuckets = nSaltBuckets;
-                                } 
-                                if (currentServerSideTableTimeStamp <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 3) {
-                                    // If the table time stamp is before 4.1.0 then we need to add below columns
-                                    // to the SYSTEM.SEQUENCE table.
-                                    String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() 
-                                            + ", " + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName()
-                                            + ", " + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName()
-                                            + ", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName();
-                                    addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
+                                } else { 
+                                    nSequenceSaltBuckets = getSaltBuckets(e);
                                 }
                             }
                             try {
@@ -1955,7 +1956,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             throw Throwables.propagate(e);
         }
     }
-
+    
+    private static int getSaltBuckets(TableAlreadyExistsException e) {
+        PTable table = e.getTable();
+        Integer sequenceSaltBuckets = table == null ? null : table.getBucketNum();
+        return sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets;
+    }
+    
     @Override
     public MutationState updateData(MutationPlan plan) throws SQLException {
         return plan.execute();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/78d91d11/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index a92223b..86da5cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -220,14 +220,15 @@ public class UpgradeUtil {
                     preSplitSequenceTable(conn, nSaltBuckets);
                     return true;
                 }
-                // We can detect upgrade from 4.2.0 -> 4.2.1 based on the timestamp of the table row
-                if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP-1) {
+                // If upgrading from 4.2.0, then we need this special case of pre-splitting the table.
+                // This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401 
+                if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) {
                     byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber());
                     KeyValue seqNumKV = KeyValueUtil.newKeyValue(seqTableKey, 
                             PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                             PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES,
                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
-                            PLong.INSTANCE.toBytes(oldTable.getSequenceNumber()+1));
+                            PLong.INSTANCE.toBytes(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
                     Put seqNumPut = new Put(seqTableKey);
                     seqNumPut.add(seqNumKV);
                     // Increment TABLE_SEQ_NUM in checkAndPut as semaphore so that only single client
@@ -243,8 +244,9 @@ public class UpgradeUtil {
                 return false;
             }
             
-            // if the SYSTEM.SEQUENCE table is for 4.1.0 or before then we need to salt the table
-            if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 3) {
+            // if the SYSTEM.SEQUENCE table is at 4.1.0 or before then we need to salt the table
+            // and pre-split it.
+            if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
                 int batchSizeBytes = 100 * 1024; // 100K chunks
                 int sizeBytes = 0;
                 List<Mutation> mutations =  Lists.newArrayListWithExpectedSize(10000);


[10/50] [abbrv] phoenix git commit: PHOENIX-514 Support functional indexes (Thomas D'Silva)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
index 2ea42ce..c8cf28e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
@@ -22,7 +22,6 @@ import java.util.List;
 
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PTable;
@@ -56,35 +55,37 @@ public class PostIndexDDLCompiler {
         //   that would allow the user to easily monitor the process of index creation.
         StringBuilder indexColumns = new StringBuilder();
         StringBuilder dataColumns = new StringBuilder();
-        List<PColumn> dataPKColumns = dataTableRef.getTable().getPKColumns();
-        PTable dataTable = dataTableRef.getTable();
-        int nPKColumns = dataPKColumns.size();
-        boolean isSalted = dataTable.getBucketNum() != null;
-        boolean isMultiTenant = connection.getTenantId() != null && dataTable.isMultiTenant();
-        int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
-        for (int i = posOffset; i < nPKColumns; i++) {
-            PColumn col = dataPKColumns.get(i);
-            if (col.getViewConstant() == null) {
-                String indexColName = IndexUtil.getIndexColumnName(col);
-                dataColumns.append('"').append(col.getName()).append("\",");
-                indexColumns.append('"').append(indexColName).append("\",");
-            }
+        
+        // Add the pk index columns
+        List<PColumn> indexPKColumns = indexTable.getPKColumns();
+        int nIndexPKColumns = indexTable.getPKColumns().size();
+        boolean isSalted = indexTable.getBucketNum() != null;
+        boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant();
+        boolean isViewIndex = indexTable.getViewIndexId()!=null;
+        int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0);
+        for (int i = posOffset; i < nIndexPKColumns; i++) {
+            PColumn col = indexPKColumns.get(i);
+            String indexColName = col.getName().getString();
+            dataColumns.append(col.getExpressionStr()).append(",");
+            indexColumns.append('"').append(indexColName).append("\",");
         }
-        for (PColumnFamily family : dataTableRef.getTable().getColumnFamilies()) {
+        
+        // Add the covered columns
+        for (PColumnFamily family : indexTable.getColumnFamilies()) {
             for (PColumn col : family.getColumns()) {
                 if (col.getViewConstant() == null) {
-                    String indexColName = IndexUtil.getIndexColumnName(col);
-                    try {
-                        indexTable.getColumn(indexColName);
-                        dataColumns.append('"').append(col.getFamilyName()).append("\".");
-                        dataColumns.append('"').append(col.getName()).append("\",");
-                        indexColumns.append('"').append(indexColName).append("\",");
-                    } catch (ColumnNotFoundException e) {
-                        // Catch and ignore - means that this data column is not in the index
+                    String indexColName = col.getName().getString();
+                    String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColName);
+                    String dataColumnName = IndexUtil.getDataColumnName(indexColName);
+                    if (!dataFamilyName.equals("")) {
+                        dataColumns.append('"').append(dataFamilyName).append("\".");
                     }
+                    dataColumns.append('"').append(dataColumnName).append("\",");
+                    indexColumns.append('"').append(indexColName).append("\",");
                 }
             }
         }
+
         dataColumns.setLength(dataColumns.length()-1);
         indexColumns.setLength(indexColumns.length()-1);
         String schemaName = dataTableRef.getTable().getSchemaName().getString();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index d534d50..2ac075e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -603,7 +603,7 @@ public class UpsertCompiler {
                         @Override
                         public MutationState execute() throws SQLException {
                             ImmutableBytesWritable ptr = context.getTempPtr();
-                            tableRef.getTable().getIndexMaintainers(ptr);
+                            tableRef.getTable().getIndexMaintainers(ptr, context.getConnection());
                             ServerCache cache = null;
                             try {
                                 if (ptr.getLength() > 0) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index 1360178..406b567 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -49,12 +49,12 @@ import org.apache.phoenix.parse.SubqueryParseNode;
 import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.ColumnRef;
-import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -173,7 +173,7 @@ public class WhereCompiler {
                 context.addWhereCoditionColumn(ref.getColumn().getFamilyName().getBytes(), ref.getColumn().getName()
                         .getBytes());
             }
-            return ref.newColumnExpression();
+            return ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive());
         }
 
         @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 9d055c3..ce81e1f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.coprocessor;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES;
@@ -60,7 +61,9 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Coprocessor;
@@ -105,25 +108,23 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
+import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.metrics.Metrics;
 import org.apache.phoenix.protobuf.ProtobufUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.AmbiguousColumnException;
-import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PColumnImpl;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
@@ -132,18 +133,23 @@ import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.types.PVarbinary;
-import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.stats.PTableStats;
 import org.apache.phoenix.schema.stats.StatisticsUtil;
 import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.types.PBinary;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.slf4j.Logger;
@@ -242,6 +248,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final KeyValue ARRAY_SIZE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
     private static final KeyValue VIEW_CONSTANT_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
     private static final KeyValue IS_VIEW_REFERENCED_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
+    private static final KeyValue COLUMN_DEF_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
     private static final List<KeyValue> COLUMN_KV_COLUMNS = Arrays.<KeyValue>asList(
             DECIMAL_DIGITS_KV,
             COLUMN_SIZE_KV,
@@ -252,7 +259,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             DATA_TABLE_NAME_KV, // included in both column and table row for metadata APIs
             ARRAY_SIZE_KV,
             VIEW_CONSTANT_KV,
-            IS_VIEW_REFERENCED_KV
+            IS_VIEW_REFERENCED_KV,
+            COLUMN_DEF_KV
             );
     static {
         Collections.sort(COLUMN_KV_COLUMNS, KeyValue.COMPARATOR);
@@ -266,7 +274,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final int ARRAY_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(ARRAY_SIZE_KV);
     private static final int VIEW_CONSTANT_INDEX = COLUMN_KV_COLUMNS.indexOf(VIEW_CONSTANT_KV);
     private static final int IS_VIEW_REFERENCED_INDEX = COLUMN_KV_COLUMNS.indexOf(IS_VIEW_REFERENCED_KV);
-
+    private static final int COLUMN_DEF_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_DEF_KV);
+    
     private static final int LINK_TYPE_INDEX = 0;
 
     private static PName newPName(byte[] keyBuffer, int keyOffset, int keyLength) {
@@ -460,7 +469,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] viewConstant = viewConstantKv == null ? null : viewConstantKv.getValue();
         Cell isViewReferencedKv = colKeyValues[IS_VIEW_REFERENCED_INDEX];
         boolean isViewReferenced = isViewReferencedKv != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isViewReferencedKv.getValueArray(), isViewReferencedKv.getValueOffset(), isViewReferencedKv.getValueLength()));
-        PColumn column = new PColumnImpl(colName, famName, dataType, maxLength, scale, isNullable, position-1, sortOrder, arraySize, viewConstant, isViewReferenced);
+        Cell columnDefKv = colKeyValues[COLUMN_DEF_INDEX];
+        String expressionStr = columnDefKv==null ? null : (String)PVarchar.INSTANCE.toObject(columnDefKv.getValueArray(), columnDefKv.getValueOffset(), columnDefKv.getValueLength());
+        PColumn column = new PColumnImpl(colName, famName, dataType, maxLength, scale, isNullable, position-1, sortOrder, arraySize, viewConstant, isViewReferenced, expressionStr);
         columns.add(column);
     }
 
@@ -1399,15 +1410,17 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                     // column, get lock and drop the index. If found as covered
                                     // column, delete from index (do this client side?).
                                     // In either case, invalidate index if the column is in it
+                                    PhoenixConnection connection = QueryUtil.getConnection(env.getConfiguration()).unwrap(PhoenixConnection.class);
                                     for (PTable index : table.getIndexes()) {
                                         try {
-                                            String indexColumnName = IndexUtil.getIndexColumnName(columnToDelete);
-                                            PColumn indexColumn = index.getColumn(indexColumnName);
+                                            IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
+                                            // get the columns required to create the index 
+                                            Set<ColumnReference> indexColumns = indexMaintainer.getIndexedColumns();
                                             byte[] indexKey =
                                                     SchemaUtil.getTableKey(tenantId, index
                                                             .getSchemaName().getBytes(), index.getTableName().getBytes());
-                                            // If index contains the column in it's PK, then drop it
-                                            if (SchemaUtil.isPKColumn(indexColumn)) {
+                                            // If index requires this column, then drop it
+                                            if (indexColumns.contains(new ColumnReference(columnToDelete.getFamilyName().getBytes(), columnToDelete.getName().getBytes()))) {
                                                 // Since we're dropping the index, lock it to ensure
                                                 // that a change in index state doesn't
                                                 // occur while we're dropping it.
@@ -1439,6 +1452,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                     return new MetaDataMutationResult(
                                             MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager
                                                     .currentTimeMillis(), table, columnToDelete);
+                                } catch (ClassNotFoundException e1) {
                                 }
                             }
                         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 5bbd5d3..a3b2faa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -66,6 +66,7 @@ import org.apache.phoenix.expression.aggregator.ServerAggregators;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.index.PhoenixIndexCodec;
@@ -270,7 +271,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                             for (IndexMaintainer maintainer : indexMaintainers) {
                                 if (!results.isEmpty()) {
                                     result.getKey(ptr);
-                                    ValueGetter valueGetter = maintainer.createGetterFromKeyValues(results);
+                                    ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr),results);
                                     Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, c.getEnvironment().getRegion().getStartKey(), c.getEnvironment().getRegion().getEndKey());
                                     indexMutations.add(put);
                                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index 3ff3dd6..7d389ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -234,6 +234,21 @@ public final class PTableProtos {
      * <code>optional bool viewReferenced = 11;</code>
      */
     boolean getViewReferenced();
+
+    // optional string expression = 12;
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    boolean hasExpression();
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    java.lang.String getExpression();
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    com.google.protobuf.ByteString
+        getExpressionBytes();
   }
   /**
    * Protobuf type {@code PColumn}
@@ -341,6 +356,11 @@ public final class PTableProtos {
               viewReferenced_ = input.readBool();
               break;
             }
+            case 98: {
+              bitField0_ |= 0x00000800;
+              expression_ = input.readBytes();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -584,6 +604,49 @@ public final class PTableProtos {
       return viewReferenced_;
     }
 
+    // optional string expression = 12;
+    public static final int EXPRESSION_FIELD_NUMBER = 12;
+    private java.lang.Object expression_;
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    public boolean hasExpression() {
+      return ((bitField0_ & 0x00000800) == 0x00000800);
+    }
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    public java.lang.String getExpression() {
+      java.lang.Object ref = expression_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          expression_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string expression = 12;</code>
+     */
+    public com.google.protobuf.ByteString
+        getExpressionBytes() {
+      java.lang.Object ref = expression_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        expression_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private void initFields() {
       columnNameBytes_ = com.google.protobuf.ByteString.EMPTY;
       familyNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -596,6 +659,7 @@ public final class PTableProtos {
       arraySize_ = 0;
       viewConstant_ = com.google.protobuf.ByteString.EMPTY;
       viewReferenced_ = false;
+      expression_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -662,6 +726,9 @@ public final class PTableProtos {
       if (((bitField0_ & 0x00000400) == 0x00000400)) {
         output.writeBool(11, viewReferenced_);
       }
+      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+        output.writeBytes(12, getExpressionBytes());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -715,6 +782,10 @@ public final class PTableProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(11, viewReferenced_);
       }
+      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(12, getExpressionBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -793,6 +864,11 @@ public final class PTableProtos {
         result = result && (getViewReferenced()
             == other.getViewReferenced());
       }
+      result = result && (hasExpression() == other.hasExpression());
+      if (hasExpression()) {
+        result = result && getExpression()
+            .equals(other.getExpression());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -850,6 +926,10 @@ public final class PTableProtos {
         hash = (37 * hash) + VIEWREFERENCED_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getViewReferenced());
       }
+      if (hasExpression()) {
+        hash = (37 * hash) + EXPRESSION_FIELD_NUMBER;
+        hash = (53 * hash) + getExpression().hashCode();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -981,6 +1061,8 @@ public final class PTableProtos {
         bitField0_ = (bitField0_ & ~0x00000200);
         viewReferenced_ = false;
         bitField0_ = (bitField0_ & ~0x00000400);
+        expression_ = "";
+        bitField0_ = (bitField0_ & ~0x00000800);
         return this;
       }
 
@@ -1053,6 +1135,10 @@ public final class PTableProtos {
           to_bitField0_ |= 0x00000400;
         }
         result.viewReferenced_ = viewReferenced_;
+        if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+          to_bitField0_ |= 0x00000800;
+        }
+        result.expression_ = expression_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1104,6 +1190,11 @@ public final class PTableProtos {
         if (other.hasViewReferenced()) {
           setViewReferenced(other.getViewReferenced());
         }
+        if (other.hasExpression()) {
+          bitField0_ |= 0x00000800;
+          expression_ = other.expression_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1564,6 +1655,80 @@ public final class PTableProtos {
         return this;
       }
 
+      // optional string expression = 12;
+      private java.lang.Object expression_ = "";
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public boolean hasExpression() {
+        return ((bitField0_ & 0x00000800) == 0x00000800);
+      }
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public java.lang.String getExpression() {
+        java.lang.Object ref = expression_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          expression_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public com.google.protobuf.ByteString
+          getExpressionBytes() {
+        java.lang.Object ref = expression_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          expression_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public Builder setExpression(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000800;
+        expression_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public Builder clearExpression() {
+        bitField0_ = (bitField0_ & ~0x00000800);
+        expression_ = getDefaultInstance().getExpression();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string expression = 12;</code>
+       */
+      public Builder setExpressionBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000800;
+        expression_ = value;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:PColumn)
     }
 
@@ -6294,35 +6459,35 @@ public final class PTableProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\014PTable.proto\032\021PGuidePosts.proto\"\347\001\n\007PC" +
+      "\n\014PTable.proto\032\021PGuidePosts.proto\"\373\001\n\007PC" +
       "olumn\022\027\n\017columnNameBytes\030\001 \002(\014\022\027\n\017family" +
       "NameBytes\030\002 \001(\014\022\020\n\010dataType\030\003 \002(\t\022\021\n\tmax" +
       "Length\030\004 \001(\005\022\r\n\005scale\030\005 \001(\005\022\020\n\010nullable\030" +
       "\006 \002(\010\022\020\n\010position\030\007 \002(\005\022\021\n\tsortOrder\030\010 \002" +
       "(\005\022\021\n\tarraySize\030\t \001(\005\022\024\n\014viewConstant\030\n " +
-      "\001(\014\022\026\n\016viewReferenced\030\013 \001(\010\"\232\001\n\013PTableSt" +
-      "ats\022\013\n\003key\030\001 \002(\014\022\016\n\006values\030\002 \003(\014\022\033\n\023guid" +
-      "ePostsByteCount\030\003 \001(\003\022\025\n\rkeyBytesCount\030\004" +
-      " \001(\003\022\027\n\017guidePostsCount\030\005 \001(\005\022!\n\013pGuideP",
-      "osts\030\006 \001(\0132\014.PGuidePosts\"\266\004\n\006PTable\022\027\n\017s" +
-      "chemaNameBytes\030\001 \002(\014\022\026\n\016tableNameBytes\030\002" +
-      " \002(\014\022\036\n\ttableType\030\003 \002(\0162\013.PTableType\022\022\n\n" +
-      "indexState\030\004 \001(\t\022\026\n\016sequenceNumber\030\005 \002(\003" +
-      "\022\021\n\ttimeStamp\030\006 \002(\003\022\023\n\013pkNameBytes\030\007 \001(\014" +
-      "\022\021\n\tbucketNum\030\010 \002(\005\022\031\n\007columns\030\t \003(\0132\010.P" +
-      "Column\022\030\n\007indexes\030\n \003(\0132\007.PTable\022\027\n\017isIm" +
-      "mutableRows\030\013 \002(\010\022 \n\nguidePosts\030\014 \003(\0132\014." +
-      "PTableStats\022\032\n\022dataTableNameBytes\030\r \001(\014\022" +
-      "\031\n\021defaultFamilyName\030\016 \001(\014\022\022\n\ndisableWAL",
-      "\030\017 \002(\010\022\023\n\013multiTenant\030\020 \002(\010\022\020\n\010viewType\030" +
-      "\021 \001(\014\022\025\n\rviewStatement\030\022 \001(\014\022\025\n\rphysical" +
-      "Names\030\023 \003(\014\022\020\n\010tenantId\030\024 \001(\014\022\023\n\013viewInd" +
-      "exId\030\025 \001(\005\022\021\n\tindexType\030\026 \001(\014\022\026\n\016statsTi" +
-      "meStamp\030\027 \001(\003\022\022\n\nstoreNulls\030\030 \001(\010*A\n\nPTa" +
-      "bleType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022" +
-      "\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoen" +
-      "ix.coprocessor.generatedB\014PTableProtosH\001" +
-      "\210\001\001\240\001\001"
+      "\001(\014\022\026\n\016viewReferenced\030\013 \001(\010\022\022\n\nexpressio" +
+      "n\030\014 \001(\t\"\232\001\n\013PTableStats\022\013\n\003key\030\001 \002(\014\022\016\n\006" +
+      "values\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003 \001(" +
+      "\003\022\025\n\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePostsCo",
+      "unt\030\005 \001(\005\022!\n\013pGuidePosts\030\006 \001(\0132\014.PGuideP" +
+      "osts\"\266\004\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014" +
+      "\022\026\n\016tableNameBytes\030\002 \002(\014\022\036\n\ttableType\030\003 " +
+      "\002(\0162\013.PTableType\022\022\n\nindexState\030\004 \001(\t\022\026\n\016" +
+      "sequenceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 \002(\003\022" +
+      "\023\n\013pkNameBytes\030\007 \001(\014\022\021\n\tbucketNum\030\010 \002(\005\022" +
+      "\031\n\007columns\030\t \003(\0132\010.PColumn\022\030\n\007indexes\030\n " +
+      "\003(\0132\007.PTable\022\027\n\017isImmutableRows\030\013 \002(\010\022 \n" +
+      "\nguidePosts\030\014 \003(\0132\014.PTableStats\022\032\n\022dataT" +
+      "ableNameBytes\030\r \001(\014\022\031\n\021defaultFamilyName",
+      "\030\016 \001(\014\022\022\n\ndisableWAL\030\017 \002(\010\022\023\n\013multiTenan" +
+      "t\030\020 \002(\010\022\020\n\010viewType\030\021 \001(\014\022\025\n\rviewStateme" +
+      "nt\030\022 \001(\014\022\025\n\rphysicalNames\030\023 \003(\014\022\020\n\010tenan" +
+      "tId\030\024 \001(\014\022\023\n\013viewIndexId\030\025 \001(\005\022\021\n\tindexT" +
+      "ype\030\026 \001(\014\022\026\n\016statsTimeStamp\030\027 \001(\003\022\022\n\nsto" +
+      "reNulls\030\030 \001(\010*A\n\nPTableType\022\n\n\006SYSTEM\020\000\022" +
+      "\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004" +
+      "B@\n(org.apache.phoenix.coprocessor.gener" +
+      "atedB\014PTableProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -6334,7 +6499,7 @@ public final class PTableProtos {
           internal_static_PColumn_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_PColumn_descriptor,
-              new java.lang.String[] { "ColumnNameBytes", "FamilyNameBytes", "DataType", "MaxLength", "Scale", "Nullable", "Position", "SortOrder", "ArraySize", "ViewConstant", "ViewReferenced", });
+              new java.lang.String[] { "ColumnNameBytes", "FamilyNameBytes", "DataType", "MaxLength", "Scale", "Nullable", "Position", "SortOrder", "ArraySize", "ViewConstant", "ViewReferenced", "Expression", });
           internal_static_PTableStats_descriptor =
             getDescriptor().getMessageTypes().get(1);
           internal_static_PTableStats_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index e3ee9e8..8a6b8d0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -150,6 +150,12 @@ public enum SQLExceptionCode {
      READ_ONLY_CONNECTION(518,"25502","Mutations are not permitted for a read-only connection."),
  
      VARBINARY_ARRAY_NOT_SUPPORTED(519, "42896", "VARBINARY ARRAY is not supported"),
+    
+     /**
+      *  Expression Index exceptions.
+      */
+     AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggreagaate expression are not allowed in an index"),
+     NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", "Non-deterministic expression are not allowed in an index"),
 
      /** 
      * HBase and Phoenix specific implementation defined sub-classes.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index b58de50..94233c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -258,7 +258,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
             }
         }
         ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-        IndexMaintainer.serialize(dataTable, ptr, indexes);
+        IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection());
         scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD, ByteUtil.copyKeyBytesIfNecessary(ptr));
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index cfa58fd..04626a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -225,7 +225,7 @@ public class MutationState implements SQLCloseable {
                 try {
                     indexMutations =
                             IndexUtil.generateIndexData(tableRef.getTable(), index, mutationsPertainingToIndex,
-                                tempPtr, connection.getKeyValueBuilder());
+                                tempPtr, connection.getKeyValueBuilder(), connection);
                 } catch (SQLException e) {
                     throw new IllegalDataException(e);
                 }
@@ -368,7 +368,7 @@ public class MutationState implements SQLCloseable {
             Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
             TableRef tableRef = entry.getKey();
             PTable table = tableRef.getTable();
-            table.getIndexMaintainers(tempPtr);
+            table.getIndexMaintainers(tempPtr, connection);
             boolean hasIndexMaintainers = tempPtr.getLength() > 0;
             boolean isDataTable = true;
             long serverTimestamp = serverTimeStamps[i++];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/expression/CoerceExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/CoerceExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/CoerceExpression.java
index 811ed47..b0396e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/CoerceExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/CoerceExpression.java
@@ -85,7 +85,7 @@ public class CoerceExpression extends BaseSingleExpression {
     @Override
     public int hashCode() {
         final int prime = 31;
-        int result = 1;
+        int result = super.hashCode();
         result = prime * result + ((maxLength == null) ? 0 : maxLength.hashCode());
         result = prime * result + ((toSortOrder == null) ? 0 : toSortOrder.hashCode());
         result = prime * result + ((toType == null) ? 0 : toType.hashCode());
@@ -95,14 +95,16 @@ public class CoerceExpression extends BaseSingleExpression {
     @Override
     public boolean equals(Object obj) {
         if (this == obj) return true;
-        if (obj == null) return false;
+        if (!super.equals(obj)) return false;
         if (getClass() != obj.getClass()) return false;
         CoerceExpression other = (CoerceExpression)obj;
         if (maxLength == null) {
             if (other.maxLength != null) return false;
         } else if (!maxLength.equals(other.maxLength)) return false;
         if (toSortOrder != other.toSortOrder) return false;
-        if (toType != other.toType) return false;
+        if (toType == null) {
+            if (other.toType != null) return false;
+        } else if (!toType.equals(other.toType)) return false;
         return true;
     }
 
@@ -122,7 +124,7 @@ public class CoerceExpression extends BaseSingleExpression {
         WritableUtils.writeVInt(output, toSortOrder.getSystemValue());
         WritableUtils.writeVInt(output, maxLength == null ? -1 : maxLength);
     }
-
+    
     @Override
     public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
         if (getChild().evaluate(tuple, ptr)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java
index 240d013..e4ec438 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java
@@ -23,10 +23,10 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
 
 
@@ -78,6 +78,10 @@ public class RowKeyColumnExpression  extends ColumnExpression {
     public int getPosition() {
         return accessor.getIndex();
     }
+    
+    public String getName() {
+        return name;
+    }
 
     @Override
     public int hashCode() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java
index 0e321a7..a6e36cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java
@@ -33,4 +33,6 @@ public interface ValueGetter {
    * @throws IOException if there is an error accessing the underlying data storage
    */
   public ImmutableBytesPtr getLatestValue(ColumnReference ref) throws IOException;
+  
+  public byte[] getRowKey();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
index 43c4028..96a7410 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
@@ -84,4 +84,9 @@ public class LazyValueGetter implements ValueGetter {
     }
     return null;
   }
+
+  @Override
+  public byte[] getRowKey() {
+	return this.row; 
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index b4ba12d..dc72059 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -101,24 +101,6 @@ public class IndexManagementUtil {
 
     }
 
-    public static ValueGetter createGetterFromKeyValues(Collection<Cell> pendingUpdates) {
-        final Map<ReferencingColumn, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates
-                .size());
-        for (Cell kv : pendingUpdates) {
-            // create new pointers to each part of the kv
-            ImmutableBytesPtr family = new ImmutableBytesPtr(kv.getRowArray(),kv.getFamilyOffset(),kv.getFamilyLength());
-            ImmutableBytesPtr qual = new ImmutableBytesPtr(kv.getRowArray(), kv.getQualifierOffset(), kv.getQualifierLength());
-            ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
-            valueMap.put(new ReferencingColumn(family, qual), value);
-        }
-        return new ValueGetter() {
-            @Override
-            public ImmutableBytesPtr getLatestValue(ColumnReference ref) throws IOException {
-                return valueMap.get(ReferencingColumn.wrap(ref));
-            }
-        };
-    }
-
     public static class ReferencingColumn {
         ImmutableBytesPtr family;
         ImmutableBytesPtr qual;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 61b6e68..31f6c76 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -23,6 +23,7 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -42,15 +43,27 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.compile.ColumnResolver;
+import org.apache.phoenix.compile.FromCompiler;
+import org.apache.phoenix.compile.IndexExpressionCompiler;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil.ReferencingColumn;
+import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
@@ -59,8 +72,11 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueSchema;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.tuple.ValueGetterTuple;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.BitSet;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
@@ -88,41 +104,15 @@ import com.google.common.collect.Sets;
  * @since 2.1.0
  */
 public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
-    
-    public static IndexMaintainer create(PTable dataTable, PTable index) {
+
+    private static final int EXPRESSION_NOT_PRESENT = -1;
+    private static final int ESTIMATED_EXPRESSION_SIZE = 8;
+
+	public static IndexMaintainer create(PTable dataTable, PTable index, PhoenixConnection connection) {
         if (dataTable.getType() == PTableType.INDEX || index.getType() != PTableType.INDEX || !dataTable.getIndexes().contains(index)) {
             throw new IllegalArgumentException();
         }
-        IndexMaintainer maintainer = new IndexMaintainer(dataTable, index);
-        int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (maintainer.isMultiTenant ? 1 : 0) + (maintainer.viewIndexId == null ? 0 : 1);
-        RowKeyMetaData rowKeyMetaData = maintainer.getRowKeyMetaData();
-        int indexColByteSize = 0;
-        for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
-            PColumn indexColumn = index.getPKColumns().get(i);
-            int indexPos = i - indexPosOffset;
-            PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
-            boolean isPKColumn = SchemaUtil.isPKColumn(column);
-            if (isPKColumn) {
-                int dataPkPos = dataTable.getPKColumns().indexOf(column) - (dataTable.getBucketNum() == null ? 0 : 1) - (maintainer.isMultiTenant ? 1 : 0);
-                rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos);
-            } else {
-                indexColByteSize += column.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(column) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
-                maintainer.getIndexedColumnTypes().add(column.getDataType());
-                maintainer.getIndexedColumns().add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
-            }
-            if (indexColumn.getSortOrder() == SortOrder.DESC) {
-                rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos);
-            }
-        }
-        for (int i = 0; i < index.getColumnFamilies().size(); i++) {
-            PColumnFamily family = index.getColumnFamilies().get(i);
-            for (PColumn indexColumn : family.getColumns()) {
-                PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
-                maintainer.getCoverededColumns().add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
-            }
-        }
-        maintainer.estimatedIndexRowKeyBytes = maintainer.estimateIndexRowKeyByteSize(indexColByteSize);
-        maintainer.initCachedState();
+        IndexMaintainer maintainer = new IndexMaintainer(dataTable, index, connection);
         return maintainer;
     }
     
@@ -158,9 +148,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
      * @param dataTable data table
      * @param ptr bytes pointer to hold returned serialized value
      */
-    public static void serialize(PTable dataTable, ImmutableBytesWritable ptr) {
+    public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, PhoenixConnection connection) {
         List<PTable> indexes = dataTable.getIndexes();
-        serialize(dataTable, ptr, indexes);
+        serialize(dataTable, ptr, indexes, connection);
     }
 
     /**
@@ -170,7 +160,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
      * @param indexes indexes to serialize
      */
     public static void serialize(PTable dataTable, ImmutableBytesWritable ptr,
-            List<PTable> indexes) {
+            List<PTable> indexes, PhoenixConnection connection) {
         Iterator<PTable> indexesItr = nonDisabledIndexIterator(indexes.iterator());
         if ((dataTable.isImmutableRows()) || !indexesItr.hasNext()) {
             indexesItr = enabledLocalIndexIterator(indexesItr);
@@ -184,7 +174,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         while (indexesItr.hasNext()) {
             nIndexes++;
             PTable index = indexesItr.next();
-            estimatedSize += index.getIndexMaintainer(dataTable).getEstimatedByteSize();
+            estimatedSize += index.getIndexMaintainer(dataTable, connection).getEstimatedByteSize();
         }
         TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1);
         DataOutput output = new DataOutputStream(stream);
@@ -197,7 +187,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                     dataTable.isImmutableRows() ? enabledLocalIndexIterator(indexes.iterator())
                             : nonDisabledIndexIterator(indexes.iterator());
             while (indexesItr.hasNext()) {
-                    indexesItr.next().getIndexMaintainer(dataTable).write(output);
+                    indexesItr.next().getIndexMaintainer(dataTable, connection).write(output);
             }
         } catch (IOException e) {
             throw new RuntimeException(e); // Impossible
@@ -238,9 +228,14 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
 
     private byte[] viewIndexId;
     private boolean isMultiTenant;
+    // indexed expressions that are not present in the row key of the data table, the expression can also refer to a regular column
+    private List<Expression> indexedExpressions;
+    // columns required to evaluate all expressions in indexedExpressions (this does not include columns in the data row key)
     private Set<ColumnReference> indexedColumns;
     private Set<ColumnReference> coveredColumns;
+    // columns required to create index row i.e. indexedColumns + coveredColumns  (this does not include columns in the data row key)
     private Set<ColumnReference> allColumns;
+    // TODO remove this in the next major release
     private List<PDataType> indexedColumnTypes;
     private RowKeyMetaData rowKeyMetaData;
     private byte[] indexTableName;
@@ -258,6 +253,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
     
     private List<ImmutableBytesPtr> indexQualifiers;
     private int estimatedIndexRowKeyBytes;
+    private int estimatedExpressionSize;
     private int[] dataPkPosition;
     private int maxTrailingNulls;
     private ColumnReference dataEmptyKeyValueRef;
@@ -267,32 +263,43 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         this.isDataTableSalted = isDataTableSalted;
     }
 
-    private IndexMaintainer(PTable dataTable, PTable index) {
+    private IndexMaintainer(PTable dataTable, PTable index, PhoenixConnection connection) {
         this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null);
         this.isMultiTenant = dataTable.isMultiTenant();
         this.viewIndexId = index.getViewIndexId() == null ? null : MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId());
         this.isLocalIndex = index.getIndexType() == IndexType.LOCAL;
 
-        RowKeySchema dataRowKeySchema = dataTable.getRowKeySchema();
-        boolean isDataTableSalted = dataTable.getBucketNum() != null;
         byte[] indexTableName = index.getPhysicalName().getBytes();
         // Use this for the nDataSaltBuckets as we need this for local indexes
         // TODO: persist nDataSaltBuckets separately, but maintain b/w compat.
         Integer nIndexSaltBuckets = isLocalIndex ? dataTable.getBucketNum() : index.getBucketNum();
         boolean indexWALDisabled = index.isWALDisabled();
         int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (this.isMultiTenant ? 1 : 0) + (this.viewIndexId == null ? 0 : 1);
+//        int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0;
         int nIndexColumns = index.getColumns().size() - indexPosOffset;
         int nIndexPKColumns = index.getPKColumns().size() - indexPosOffset;
-        int indexedColumnsCount = 0;
-        for (int i  = indexPosOffset; i<index.getPKColumns().size();i++) {
-            PColumn indexColumn = index.getPKColumns().get(i);
-            PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
-            boolean isPKColumn = SchemaUtil.isPKColumn(column);
-            if (!isPKColumn) {
-                indexedColumnsCount++;
-            } 
+        // number of expressions that are indexed that are not present in the row key of the data table
+        int indexedExpressionCount = 0;
+        for (int i = indexPosOffset; i<index.getPKColumns().size();i++) {
+        	PColumn indexColumn = index.getPKColumns().get(i);
+        	if (!IndexUtil.isIndexColumn(indexColumn)) {
+                continue;
+            }
+        	String indexColumnName = indexColumn.getName().getString();
+            String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColumnName);
+            String dataColumnName = IndexUtil.getDataColumnName(indexColumnName);
+            try {
+                PColumn dataColumn = dataFamilyName.equals("") ? dataTable.getColumn(dataColumnName) : dataTable.getColumnFamily(dataFamilyName).getColumn(dataColumnName);
+                if (SchemaUtil.isPKColumn(dataColumn)) 
+                    continue;
+            } catch (ColumnNotFoundException e) {
+             // This column must be an expression
+            } catch (Exception e) {
+                throw new IllegalArgumentException(e);
+            }
+            indexedExpressionCount++;
         }
-        int indexPkColumnCount = this.dataRowKeySchema.getFieldCount() + indexedColumnsCount - (isDataTableSalted ? 1 : 0) - (isMultiTenant ? 1 : 0);
+        int indexPkColumnCount = this.dataRowKeySchema.getFieldCount() + indexedExpressionCount  - (this.isDataTableSalted ? 1 : 0) - (this.isMultiTenant ? 1 : 0);
         this.rowKeyMetaData = newRowKeyMetaData(indexPkColumnCount);
         BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
 
@@ -312,12 +319,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             }
         }
         this.indexTableName = indexTableName;
-        this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexPKColumns-nDataPKColumns);
         this.indexedColumnTypes = Lists.<PDataType>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
+        this.indexedExpressions = Lists.newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
         this.coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexColumns-nIndexPKColumns);
-        this.allColumns = Sets.newLinkedHashSetWithExpectedSize(nDataPKColumns + nIndexColumns);
-        this.allColumns.addAll(indexedColumns);
-        this.allColumns.addAll(coveredColumns);
         this.nIndexSaltBuckets  = nIndexSaltBuckets == null ? 0 : nIndexSaltBuckets;
         this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(dataTable);
         this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(index);
@@ -326,6 +330,60 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         // TODO: check whether index is immutable or not. Currently it's always false so checking
         // data table is with immutable rows or not.
         this.immutableRows = dataTable.isImmutableRows();
+        int indexColByteSize = 0;
+        ColumnResolver resolver = null;
+        try {
+            resolver = FromCompiler.getResolver(new TableRef(dataTable));
+        } catch (SQLException e) {
+            throw new RuntimeException(e); // Impossible
+        }
+        StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver);
+        IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
+        for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
+            PColumn indexColumn = index.getPKColumns().get(i);
+            if (!IndexUtil.isIndexColumn(indexColumn)) {
+                continue;
+            }
+            int indexPos = i - indexPosOffset;
+            Expression expression = null;
+            try {
+                expressionIndexCompiler.reset();
+                ParseNode parseNode  = SQLParser.parseCondition(indexColumn.getExpressionStr());
+                expression = parseNode.accept(expressionIndexCompiler);
+            } catch (SQLException e) {
+                throw new RuntimeException(e); // Impossible
+            }
+            if ( expressionIndexCompiler.getColumnRef()!=null ) {
+            	// get the column of the data table that corresponds to this index column
+	            PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
+	            boolean isPKColumn = SchemaUtil.isPKColumn(column);
+	            if (isPKColumn) {
+	                int dataPkPos = dataTable.getPKColumns().indexOf(column) - (dataTable.getBucketNum() == null ? 0 : 1) - (this.isMultiTenant ? 1 : 0);
+	                this.rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos);
+	            } else {
+	                indexColByteSize += column.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(column) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
+	                this.indexedExpressions.add(expression);
+	            }
+            }
+            else {
+            	indexColByteSize += expression.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(expression) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
+                this.indexedExpressions.add(expression);
+            }
+            // set the sort order of the expression correctly
+            if (indexColumn.getSortOrder() == SortOrder.DESC) {
+                this.rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos);
+            }
+        }
+        this.estimatedExpressionSize = expressionIndexCompiler.getTotalNodeCount() * ESTIMATED_EXPRESSION_SIZE;
+        for (int i = 0; i < index.getColumnFamilies().size(); i++) {
+            PColumnFamily family = index.getColumnFamilies().get(i);
+            for (PColumn indexColumn : family.getColumns()) {
+                PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
+                this.coveredColumns.add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
+            }
+        }
+        this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize);
+        initCachedState();
     }
 
     public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey)  {
@@ -388,30 +446,26 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                 } 
             }
             BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
-            int j = 0;
-            Iterator<ColumnReference> iterator = indexedColumns.iterator();
+            Iterator<Expression> expressionIterator = indexedExpressions.iterator();
             for (int i = 0; i < nIndexedColumns; i++) {
                 PDataType dataColumnType;
-                boolean isNullable = true;
-                boolean isDataColumnInverted = false;
-                SortOrder dataSortOrder = SortOrder.getDefault();
-                if (dataPkPosition[i] == -1) {
-                    dataColumnType = indexedColumnTypes.get(j);
-                    ImmutableBytesPtr value = valueGetter.getLatestValue(iterator.next());
-                    if (value == null) {
-                        ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
-                    } else {
-                        ptr.set(value.copyBytesIfNecessary());
-                    }
-                    j++;
-               } else {
+                boolean isNullable;
+                SortOrder dataSortOrder;
+                if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
+                	Expression expression = expressionIterator.next();
+                	dataColumnType = expression.getDataType();
+                	dataSortOrder = expression.getSortOrder();
+                    isNullable = expression.isNullable();
+                	expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
+                }
+                else {
                     Field field = dataRowKeySchema.getField(dataPkPosition[i]);
                     dataColumnType = field.getDataType();
                     ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
                     dataSortOrder = field.getSortOrder();
-                    isDataColumnInverted = dataSortOrder != SortOrder.ASC;
                     isNullable = field.isNullable();
                 }
+                boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
                 PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
                 boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType) ;
                 if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(i)) {
@@ -643,10 +697,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                 indexFields[pos] = dataRowKeySchema.getField(i);
             } 
         }
-        int indexedColumnTypesIndex = 0;
+        Iterator<Expression> expressionSetItr = indexedExpressions.iterator();
         for (Field indexField : indexFields) {
             if (indexField == null) { // Add field for kv column in index
-                final PDataType dataType = indexedColumnTypes.get(indexedColumnTypesIndex++);
+                final PDataType dataType = expressionSetItr.next().getDataType();
                 builder.addField(new PDatum() {
 
                     @Override
@@ -823,10 +877,6 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         return coveredColumns;
     }
 
-    public Set<ColumnReference> getIndexedColumns() {
-        return indexedColumns;
-    }
-
     public Set<ColumnReference> getAllColumns() {
         return allColumns;
     }
@@ -838,14 +888,6 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         return emptyKeyValueCFPtr;
     }
 
-    private RowKeyMetaData getRowKeyMetaData() {
-        return rowKeyMetaData;
-    }
-    
-    private List<PDataType> getIndexedColumnTypes() {
-        return indexedColumnTypes;
-    }
-
     @Override
     public void readFields(DataInput input) throws IOException {
         int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
@@ -881,7 +923,62 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         }
         indexTableName = Bytes.readByteArray(input);
         dataEmptyKeyValueCF = Bytes.readByteArray(input);
-        emptyKeyValueCFPtr = new ImmutableBytesPtr(Bytes.readByteArray(input));
+        int len = WritableUtils.readVInt(input);
+        //TODO remove this in the next major release
+        boolean isNewClient = false;
+        if (len < 0) {
+          isNewClient = true;
+          len=Math.abs(len);
+        }
+        byte [] emptyKeyValueCF = new byte[len];
+        input.readFully(emptyKeyValueCF, 0, len);
+        emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);
+        
+        if (isNewClient) {
+            int numIndexedExpressions = WritableUtils.readVInt(input);
+            indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);        
+            for (int i = 0; i < numIndexedExpressions; i++) {
+            	Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
+            	expression.readFields(input);
+            	indexedExpressions.add(expression);
+            }
+        }
+        else {
+            indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
+            Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
+            Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
+            while (colReferenceIter.hasNext()) {
+                ColumnReference colRef = colReferenceIter.next();
+                final PDataType dataType = dataTypeIter.next();
+                indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {
+                    
+                    @Override
+                    public boolean isNullable() {
+                        return true;
+                    }
+                    
+                    @Override
+                    public SortOrder getSortOrder() {
+                        return SortOrder.getDefault();
+                    }
+                    
+                    @Override
+                    public Integer getScale() {
+                        return null;
+                    }
+                    
+                    @Override
+                    public Integer getMaxLength() {
+                        return null;
+                    }
+                    
+                    @Override
+                    public PDataType getDataType() {
+                        return dataType;
+                    }
+                }, colRef.getFamily(), colRef.getQualifier()));
+            }
+        }
         
         rowKeyMetaData = newRowKeyMetaData();
         rowKeyMetaData.readFields(input);
@@ -908,6 +1005,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             Bytes.writeByteArray(output, ref.getFamily());
             Bytes.writeByteArray(output, ref.getQualifier());
         }
+        //TODO remove indexedColumnTypes in the next major release
         for (int i = 0; i < indexedColumnTypes.size(); i++) {
             PDataType type = indexedColumnTypes.get(i);
             WritableUtils.writeVInt(output, type.ordinal());
@@ -920,9 +1018,17 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         }
         Bytes.writeByteArray(output, indexTableName);
         Bytes.writeByteArray(output, dataEmptyKeyValueCF);
-        WritableUtils.writeVInt(output,emptyKeyValueCFPtr.getLength());
+        // TODO in order to maintain b/w compatibility encode emptyKeyValueCFPtr.getLength() as a negative value (so we can distinguish between new and old clients)
+        // when indexedColumnTypes is removed, remove this 
+        WritableUtils.writeVInt(output,-emptyKeyValueCFPtr.getLength());
         output.write(emptyKeyValueCFPtr.get(),emptyKeyValueCFPtr.getOffset(), emptyKeyValueCFPtr.getLength());
         
+        WritableUtils.writeVInt(output, indexedExpressions.size());
+        for (Expression expression : indexedExpressions) {
+        	WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
+        	expression.write(output);
+        }
+        
         rowKeyMetaData.write(output);
         // Encode indexWALDisabled in nDataCFs
         WritableUtils.writeVInt(output, (nDataCFs + 1) * (indexWALDisabled ? -1 : 1));
@@ -941,7 +1047,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             size += WritableUtils.getVIntSize(ref.getQualifier().length);
             size += ref.getQualifier().length;
         }
-        size += indexedColumnTypes.size();
+        for (int i = 0; i < indexedColumnTypes.size(); i++) {
+            PDataType type = indexedColumnTypes.get(i);
+            size += WritableUtils.getVIntSize(type.ordinal());
+        }
         size += WritableUtils.getVIntSize(coveredColumns.size());
         for (ColumnReference ref : coveredColumns) {
             size += WritableUtils.getVIntSize(ref.getFamily().length);
@@ -954,13 +1063,18 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         size += dataEmptyKeyValueCF.length + WritableUtils.getVIntSize(dataEmptyKeyValueCF.length);
         size += emptyKeyValueCFPtr.getLength() + WritableUtils.getVIntSize(emptyKeyValueCFPtr.getLength());
         size += WritableUtils.getVIntSize(nDataCFs+1);
+        size += WritableUtils.getVIntSize(indexedExpressions.size());
+        for (Expression expression : indexedExpressions) {
+            size += WritableUtils.getVIntSize(ExpressionType.valueOf(expression).ordinal());
+        }
+        size += estimatedExpressionSize;
         return size;
     }
     
     private int estimateIndexRowKeyByteSize(int indexColByteSize) {
         int estimatedIndexRowKeyBytes = indexColByteSize + dataRowKeySchema.getEstimatedValueLength() + (nIndexSaltBuckets == 0 || isLocalIndex || this.isDataTableSalted ? 0 : SaltingUtil.NUM_SALTING_BYTES);
         return estimatedIndexRowKeyBytes;
-   }
+    }
     
     /**
      * Init calculated state reading/creating
@@ -976,20 +1090,33 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                 ref.getFamily(), ref.getQualifier())));
         }
 
-        this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedColumns.size() + coveredColumns.size());
+        this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumns.size());
+        // columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key)
+        this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size());
+        for (Expression expression : indexedExpressions) {
+        	KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() {
+                @Override
+                public Void visit(KeyValueColumnExpression expression) {
+                	indexedColumns.add(new ColumnReference(expression.getColumnFamily(), expression.getColumnName()));
+                    indexedColumnTypes.add(expression.getDataType());
+                    return null;
+                }
+            };
+            expression.accept(visitor);
+        }
         allColumns.addAll(indexedColumns);
         allColumns.addAll(coveredColumns);
         
         int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
         int nIndexPkColumns = getIndexPkColumnCount();
         dataPkPosition = new int[nIndexPkColumns];
-        Arrays.fill(dataPkPosition, -1);
+        Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT);
         int numViewConstantColumns = 0;
         BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet();
         for (int i = dataPkOffset; i < dataRowKeySchema.getFieldCount(); i++) {
             if (!viewConstantColumnBitSet.get(i)) {
-                int dataPkPosition = rowKeyMetaData.getIndexPkPosition(i-dataPkOffset);
-                this.dataPkPosition[dataPkPosition] = i;
+                int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i-dataPkOffset);
+                this.dataPkPosition[indexPkPosition] = i;
             } else {
                 numViewConstantColumns++;
             }
@@ -998,15 +1125,15 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         // Calculate the max number of trailing nulls that we should get rid of after building the index row key.
         // We only get rid of nulls for variable length types, so we have to be careful to consider the type of the
         // index table, not the data type of the data table
-        int indexedColumnTypesPos = indexedColumnTypes.size()-1;
+        int expressionsPos = indexedExpressions.size();
         int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1;
         while (indexPkPos >= 0) {
             int dataPkPos = dataPkPosition[indexPkPos];
             boolean isDataNullable;
             PDataType dataType;
-            if (dataPkPos == -1) {
+            if (dataPkPos == EXPRESSION_NOT_PRESENT) {
                 isDataNullable = true;
-                dataType = indexedColumnTypes.get(indexedColumnTypesPos--);
+                dataType = indexedExpressions.get(--expressionsPos).getDataType();
             } else {
                 Field dataField = dataRowKeySchema.getField(dataPkPos);
                 dataType = dataField.getDataType();
@@ -1022,7 +1149,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
     }
 
     private int getIndexPkColumnCount() {
-        return dataRowKeySchema.getFieldCount() + indexedColumns.size() - (isDataTableSalted ? 1 : 0) - (isMultiTenant ? 1 : 0);
+        return dataRowKeySchema.getFieldCount() + indexedExpressions.size() - (isDataTableSalted ? 1 : 0) - (isMultiTenant ? 1 : 0);
     }
     
     private RowKeyMetaData newRowKeyMetaData() {
@@ -1178,7 +1305,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         return allColumns.iterator();
     }
 
-    public ValueGetter createGetterFromKeyValues(Collection<? extends Cell> pendingUpdates) {
+    public ValueGetter createGetterFromKeyValues(final byte[] rowKey, Collection<? extends Cell> pendingUpdates) {
         final Map<ReferencingColumn, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates
                 .size());
         for (Cell kv : pendingUpdates) {
@@ -1190,10 +1317,14 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         }
         return new ValueGetter() {
             @Override
-            public ImmutableBytesPtr getLatestValue(ColumnReference ref) throws IOException {
+            public ImmutableBytesPtr getLatestValue(ColumnReference ref) {
                 if(ref.equals(dataEmptyKeyValueRef)) return null;
                 return valueMap.get(ReferencingColumn.wrap(ref));
             }
+            @Override
+            public byte[] getRowKey() {
+            	return rowKey;
+            }
         };
     }
 
@@ -1208,4 +1339,8 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
     public boolean isImmutableRows() {
         return immutableRows;
     }
+    
+    public Set<ColumnReference> getIndexedColumns() {
+        return indexedColumns;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index 48a7868..99e26d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -154,7 +154,7 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
                 } else {
                     indexUpdate.setTable(maintainer.getIndexTableName());
                 }
-                valueGetter = maintainer.createGetterFromKeyValues(state.getPendingUpdate());
+                valueGetter = maintainer.createGetterFromKeyValues(dataRowKey, state.getPendingUpdate());
             } else {
                 // TODO: if more efficient, I could do this just once with all columns in all indexes
                 Pair<Scanner,IndexUpdate> statePair = state.getIndexedColumnsTableState(maintainer.getAllColumns());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 1c98c5c..2fd168a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -19,7 +19,13 @@ package org.apache.phoenix.index;
 
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.*;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,28 +43,27 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-
-import com.google.common.collect.Multimap;
-
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
-import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
-import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
+import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.protobuf.ProtobufUtil;
 import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import com.google.common.collect.Multimap;
+
 /**
  * 
  * Handler called in the event that index updates cannot be written to their
@@ -219,7 +224,7 @@ public class PhoenixIndexFailurePolicy extends  KillServerOnFailurePolicy {
                 return Collections.emptySet();
             }
 
-            IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable);
+            IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
             HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
             int offset =
                     regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 76a1ad1..b26f408 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -166,6 +166,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final String NULLABLE = "NULLABLE";
     public static final byte[] NULLABLE_BYTES = Bytes.toBytes(NULLABLE);
     public static final String COLUMN_DEF = "COLUMN_DEF";
+    public static final byte[] COLUMN_DEF_BYTES = Bytes.toBytes(COLUMN_DEF);
     public static final String SQL_DATA_TYPE = "SQL_DATA_TYPE";
     public static final String SQL_DATETIME_SUB = "SQL_DATETIME_SUB";
     public static final String CHAR_OCTET_LENGTH = "CHAR_OCTET_LENGTH";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 93212bc..4ca5bb5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -81,6 +81,7 @@ import org.apache.phoenix.parse.DropTableStatement;
 import org.apache.phoenix.parse.ExplainStatement;
 import org.apache.phoenix.parse.FilterableStatement;
 import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.parse.IndexKeyConstraint;
 import org.apache.phoenix.parse.LimitNode;
 import org.apache.phoenix.parse.NamedNode;
 import org.apache.phoenix.parse.NamedTableNode;
@@ -521,9 +522,9 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
 
     private static class ExecutableCreateIndexStatement extends CreateIndexStatement implements CompilableStatement {
 
-        public ExecutableCreateIndexStatement(NamedNode indexName, NamedTableNode dataTable, PrimaryKeyConstraint pkConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
+        public ExecutableCreateIndexStatement(NamedNode indexName, NamedTableNode dataTable, IndexKeyConstraint ikConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
                 ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType indexType, int bindCount) {
-            super(indexName, dataTable, pkConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
+            super(indexName, dataTable, ikConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
         }
 
         @SuppressWarnings("unchecked")
@@ -852,9 +853,9 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         }
         
         @Override
-        public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, PrimaryKeyConstraint pkConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
+        public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, IndexKeyConstraint ikConstraint, List<ColumnName> includeColumns, List<ParseNode> splits,
                 ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType indexType, int bindCount) {
-            return new ExecutableCreateIndexStatement(indexName, dataTable, pkConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
+            return new ExecutableCreateIndexStatement(indexName, dataTable, ikConstraint, includeColumns, splits, props, ifNotExists, indexType, bindCount);
         }
         
         @Override


[05/50] [abbrv] phoenix git commit: PHOENIX-1616 Creating a View with a case sensitive column name does not work (Thomas D'Silva)

Posted by ma...@apache.org.
PHOENIX-1616 Creating a View with a case sensitive column name does not work (Thomas D'Silva)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/03a5d7ef
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/03a5d7ef
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/03a5d7ef

Branch: refs/heads/calcite
Commit: 03a5d7ef198f059b289a7195613db8d375b88e46
Parents: a1baf2a
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Jan 31 11:17:46 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Jan 31 11:17:46 2015 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 84 ++++++++---------
 .../phoenix/end2end/HashJoinLocalIndexIT.java   |  4 +-
 .../apache/phoenix/end2end/SortMergeJoinIT.java | 22 ++---
 .../org/apache/phoenix/end2end/SubqueryIT.java  | 70 +++++++-------
 .../end2end/SubqueryUsingSortMergeJoinIT.java   | 96 ++++++++++----------
 .../java/org/apache/phoenix/end2end/ViewIT.java | 27 ++++++
 .../index/GlobalIndexOptimizationIT.java        | 18 ++--
 .../phoenix/compile/ExpressionCompiler.java     |  2 +-
 .../apache/phoenix/parse/ColumnParseNode.java   |  4 +
 .../org/apache/phoenix/parse/TableName.java     | 24 +++--
 .../org/apache/phoenix/schema/ColumnRef.java    |  8 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java |  2 +-
 .../org/apache/phoenix/schema/TableRef.java     | 37 ++++----
 .../java/org/apache/phoenix/util/IndexUtil.java |  3 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |  8 +-
 15 files changed, 225 insertions(+), 184 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index a699d48..781bfea 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -127,7 +127,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     GROUP BY i.item_id ORDER BY q DESC"
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
@@ -141,9 +141,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /* 
@@ -166,9 +166,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -218,7 +218,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
                 "    PARALLEL INNER-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i2.item_id, i1.name FROM joinItemTable i1 
@@ -229,7 +229,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (I2.item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -241,7 +241,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (I2.supplier_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.supplier_id\")",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -264,13 +264,13 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY order_id
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER SORTED BY [O.order_id]\n" +
+                "    SERVER SORTED BY [\"O.order_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -285,7 +285,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY c.customer_id, i.name
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " [*] - ['0000000005']\n" +
-                "    SERVER SORTED BY [C.customer_id, I.NAME]\n" +
+                "    SERVER SORTED BY [\"C.customer_id\", I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -295,7 +295,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY customer_id IN (O.customer_id)",
+                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -338,7 +338,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
@@ -354,7 +354,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /*
                  * testNestedSubqueries()
@@ -415,7 +415,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
@@ -491,7 +491,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     GROUP BY i.item_id ORDER BY q DESC"
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.:item_id]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.:item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
@@ -505,9 +505,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /* 
@@ -530,9 +530,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -592,7 +592,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (I2.:item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.:item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -630,7 +630,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER SORTED BY [O.order_id]\n" +
+                "    SERVER SORTED BY [\"O.order_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -651,7 +651,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY c.customer_id, i.name
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " [*] - ['0000000005']\n" +
-                "    SERVER SORTED BY [C.customer_id, I.0:NAME]\n" +
+                "    SERVER SORTED BY [\"C.customer_id\", I.0:NAME]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -661,7 +661,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY customer_id IN (O.customer_id)",
+                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -705,7 +705,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
@@ -721,7 +721,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /*
                  * testNestedSubqueries()
@@ -782,7 +782,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_SCHEMA + ".idx_item\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
@@ -859,7 +859,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     GROUP BY i.item_id ORDER BY q DESC"
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.:item_id]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.:item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
@@ -874,9 +874,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /* 
@@ -900,9 +900,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.item_id\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n" +
+                "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, \"I.item_id\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -958,7 +958,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i2.item_id, i1.name FROM joinItemTable i1 
@@ -970,7 +970,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER "+ MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n"  +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (I2.:item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.:item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -984,7 +984,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (I2.0:supplier_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.0:supplier_id\")",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -1012,7 +1012,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER SORTED BY [O.order_id]\n"+
+                "    SERVER SORTED BY [\"O.order_id\"]\n"+
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -1020,7 +1020,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_CUSTOMER_TABLE_DISPLAY_NAME+" [-32768]\n"+
                 "                    SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "                CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -1035,7 +1035,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY c.customer_id, i.name
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " [*] - ['0000000005']\n" +
-                "    SERVER SORTED BY [C.customer_id, I.0:NAME]\n"+
+                "    SERVER SORTED BY [\"C.customer_id\", I.0:NAME]\n"+
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -1046,7 +1046,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT MERGE SORT\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY customer_id IN (O.customer_id)",
+                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -1092,7 +1092,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
@@ -1108,7 +1108,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 /*
                  * testNestedSubqueries()
@@ -1172,7 +1172,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
index 6f47044..45e80c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
@@ -105,7 +105,7 @@ public class HashJoinLocalIndexIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'T6']\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)",
+                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768,'S1']\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
@@ -115,7 +115,7 @@ public class HashJoinLocalIndexIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'T6']\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)",
+                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'S3']\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
index 0d76082..4503b5b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
@@ -112,14 +112,14 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER SORTED BY [I.item_id]\n" +
+                "            SERVER SORTED BY [\"I.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
-                "            SERVER SORTED BY [O.item_id]\n" +
+                "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.supplier_id]",
+                "    CLIENT SORTED BY [\"I.supplier_id\"]",
                 }});
         testCases.add(new String[][] {
                 {
@@ -130,19 +130,19 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "SORT-MERGE-JOIN (LEFT) TABLES\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_supplier\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "        SERVER SORTED BY [S.:supplier_id]\n" +
+                "        SERVER SORTED BY [\"S.:supplier_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "AND\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
-                "            SERVER SORTED BY [I.:item_id]\n" +
+                "            SERVER SORTED BY [\"I.:item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
-                "            SERVER SORTED BY [O.item_id]\n" +
+                "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.0:supplier_id]"
+                "    CLIENT SORTED BY [\"I.0:supplier_id\"]"
                 }});
         testCases.add(new String[][] {
                 {
@@ -153,19 +153,19 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "SORT-MERGE-JOIN (LEFT) TABLES\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "        SERVER SORTED BY [S.:supplier_id]\n" +
+                "        SERVER SORTED BY [\"S.:supplier_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "AND\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
-                "            SERVER SORTED BY [I.:item_id]\n" +
+                "            SERVER SORTED BY [\"I.:item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
-                "            SERVER SORTED BY [O.item_id]\n" +
+                "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.0:supplier_id]"
+                "    CLIENT SORTED BY [\"I.0:supplier_id\"]"
                 }});
         return testCases;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index 85e562c..2d11c5c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -111,9 +111,9 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
                 "    SKIP-SCAN-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " \\['000000000000001'\\] - \\[\\*\\]\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER SORTED BY [I.NAME]\n" +
@@ -122,28 +122,28 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    PARALLEL SEMI-JOIN TABLE 1(DELAYED EVALUATION) (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 4-WAY FULL SCAN OVER " + JOIN_COITEM_TABLE_DISPLAY_NAME + "\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            SKIP-SCAN-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
@@ -151,23 +151,23 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL ANTI-JOIN TABLE 0 (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" +
                 "    SKIP-SCAN-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
-                "    DYNAMIC SERVER FILTER BY customer_id IN \\(\\$\\d+.\\$\\d+\\)"
+                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
         testCases.add(new String[][] {
                 {
@@ -181,7 +181,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "    PARALLEL SEMI-JOIN TABLE 1 \\(SKIP MERGE\\)\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " \\['000000000000001'\\] - \\[\\*\\]\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_supplier\n" +
@@ -192,7 +192,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "    PARALLEL SEMI-JOIN TABLE 1(DELAYED EVALUATION) (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 4-WAY FULL SCAN OVER " + JOIN_COITEM_TABLE_DISPLAY_NAME + "\n" +
@@ -200,20 +200,20 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
@@ -221,7 +221,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
                 "    PARALLEL ANTI-JOIN TABLE 0 (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_customer\n" +
@@ -229,13 +229,13 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)"
                 }});
@@ -253,9 +253,9 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    PARALLEL SEMI-JOIN TABLE 1 \\(SKIP MERGE\\)\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " \\['000000000000001'\\] - \\[\\*\\]\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)",
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
                             
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
@@ -266,7 +266,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    PARALLEL SEMI-JOIN TABLE 1(DELAYED EVALUATION) (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
 
                 "CLIENT PARALLEL 4-WAY FULL SCAN OVER " + JOIN_COITEM_TABLE_DISPLAY_NAME + "\n" +
@@ -274,22 +274,22 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
@@ -297,7 +297,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL ANTI-JOIN TABLE 0 (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "        CLIENT MERGE SORT",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
@@ -306,17 +306,17 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
-                "    DYNAMIC SERVER FILTER BY customer_id IN \\(\\$\\d+.\\$\\d+\\)"
+                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
         return testCases;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
index 82b1c68..7457e02 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
@@ -110,18 +110,18 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "SORT-MERGE-JOIN (SEMI) TABLES\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER SORTED BY [I.supplier_id]\n" +
+                "            SERVER SORTED BY [\"I.supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER SORTED BY [S.supplier_id]\n" +
+                "            SERVER SORTED BY [\"S.supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.item_id]\n" +
+                "    CLIENT SORTED BY [\"I.item_id\"]\n" +
                 "AND (SKIP MERGE)\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [item_id]\n" +
+                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [I.NAME]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -130,40 +130,40 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id]\\\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"]\\\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY \\[.*.CO_ITEM_ID, .*.CO_ITEM_NAME\\]\n" +
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "        SKIP-SCAN-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",            
 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" +
                 "AND \\(SKIP MERGE\\)\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[O.customer_id\\]\n" +
+                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         testCases.add(new String[][] {
@@ -175,19 +175,19 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "SORT-MERGE-JOIN (SEMI) TABLES\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
-                "            SERVER SORTED BY [I.0:supplier_id]\n" +
+                "            SERVER SORTED BY [\"I.0:supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_supplier\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER SORTED BY [S.:supplier_id]\n" +
+                "            SERVER SORTED BY [\"S.:supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.:item_id]\n" +
+                "    CLIENT SORTED BY [\"I.:item_id\"]\n" +
                 "AND (SKIP MERGE)\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [item_id]\n" +
+                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [I.0:NAME]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -197,42 +197,42 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY \\[.*.CO_ITEM_ID, .*.CO_ITEM_NAME\\]\n" +
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_customer\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER SORTED BY \\[Join.idx_customer.:customer_id\\]\n" +
+                "        SERVER SORTED BY \\[\"Join.idx_customer.:customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "AND \\(SKIP MERGE\\)\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[O.customer_id\\]\n" +
+                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
@@ -245,19 +245,19 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "SORT-MERGE-JOIN (SEMI) TABLES\n" +
                 "    SORT-MERGE-JOIN (INNER) TABLES\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
-                "            SERVER SORTED BY [I.0:supplier_id]\n" +
+                "            SERVER SORTED BY [\"I.0:supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER SORTED BY [S.:supplier_id]\n" +
+                "            SERVER SORTED BY [\"S.:supplier_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [I.:item_id]\n" +
+                "    CLIENT SORTED BY [\"I.:item_id\"]\n" +
                 "AND (SKIP MERGE)\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [item_id]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [item_id]\n" +
+                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [I.0:NAME]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -267,45 +267,45 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY \\[.*.CO_ITEM_ID, .*.CO_ITEM_NAME\\]\n" +
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, item_id\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[item_id, NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER SORTED BY \\[Join.idx_customer.:customer_id\\]\n" +
+                "        SERVER SORTED BY \\[\"Join.idx_customer.:customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "AND \\(SKIP MERGE\\)\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[O.customer_id\\]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[O.customer_id\\]\n" +
+                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
+                "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         return testCases;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 0b06e03..aa26f9b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -245,6 +245,33 @@ public class ViewIT extends BaseViewIT {
     }
     
     @Test
+    public void testReadOnlyViewWithCaseSensitiveColumnNames() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE t (\"k\" INTEGER NOT NULL PRIMARY KEY, \"v1\" INTEGER, \"a\".v2 VARCHAR)";
+        conn.createStatement().execute(ddl);
+        ddl = "CREATE VIEW v (v VARCHAR) AS SELECT * FROM t WHERE \"k\" > 5 and \"v1\" > 1";
+        conn.createStatement().execute(ddl);
+        try {
+            conn.createStatement().execute("UPSERT INTO v VALUES(1)");
+            fail();
+        } catch (ReadOnlyTableException e) {
+            
+        }
+        for (int i = 0; i < 10; i++) {
+            conn.createStatement().execute("UPSERT INTO t VALUES(" + i + ", " + (i+10) + ",'A')");
+        }
+        conn.commit();
+        
+        int count = 0;
+        ResultSet rs = conn.createStatement().executeQuery("SELECT \"k\", \"v1\",\"a\".v2 FROM v");
+        while (rs.next()) {
+            count++;
+            assertEquals(count + 5, rs.getInt(1));
+        }
+        assertEquals(4, count);
+    }
+    
+    @Test
     public void testViewAndTableInDifferentSchemas() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
         String ddl = "CREATE TABLE s1.t (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index f526c6a..e54e6a2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -113,7 +113,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\['a'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             String actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -138,7 +138,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\['a'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -168,7 +168,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\[\\*\\] - \\['z'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -203,7 +203,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\[\\*\\] - \\['z'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                            "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -237,7 +237,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER I \\[\\*\\] - \\['z'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                            "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -275,8 +275,8 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER I \\['tid1','a'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
-            assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
+                            "    DYNAMIC SERVER FILTER BY \\(\"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+            assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected, actual));
             
             rs = conn1.createStatement().executeQuery(query);
             assertTrue(rs.next());
@@ -323,8 +323,8 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_T \\[-32768,1\\] - \\[-32768,2\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY AND K2 IN \\(3,4\\)\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(T_ID, K1, K2\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
-            assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected,actual));
+                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+            assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected,actual));
             
             rs = conn1.createStatement().executeQuery(query);
             assertTrue(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 32216db..95e145c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -387,7 +387,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
         if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(column)) { // project only kv columns
             context.getScan().addColumn(column.getFamilyName().getBytes(), column.getName().getBytes());
         }
-        Expression expression = ref.newColumnExpression();
+        Expression expression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive());
         Expression wrappedExpression = wrapGroupByExpression(expression);
         // If we're in an aggregate expression
         // and we're not in the context of an aggregate function

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
index 95ef6a4..19dbc68 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java
@@ -85,4 +85,8 @@ public class ColumnParseNode extends NamedParseNode {
         ColumnParseNode other = (ColumnParseNode)obj;
         return fullName.equals(other.fullName);
     }
+    
+    public boolean isTableNameCaseSensitive() {
+        return tableName == null ? false : tableName.isTableNameCaseSensitive();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
index 9f43371..9717067 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
@@ -23,20 +23,30 @@ import org.apache.phoenix.util.SchemaUtil;
 public class TableName {
     private final String tableName;
     private final String schemaName;
+    private final boolean isTableNameCaseSensitive;
+    private final boolean isSchemaNameCaseSensitive;
     
     public static TableName createNormalized(String schemaName, String tableName) {
-        schemaName = schemaName == null ? null : SchemaUtil.normalizeIdentifier(schemaName);
-        tableName = SchemaUtil.normalizeIdentifier(tableName);
-        return new TableName(schemaName, tableName);
+        return new TableName(schemaName, tableName, true);
     }
     
     public static TableName create(String schemaName, String tableName) {
-        return new TableName(schemaName,tableName);
+        return new TableName(schemaName, tableName, false);
     }
     
-    private TableName(String schemaName, String tableName) {
-        this.schemaName = schemaName;
-        this.tableName = tableName;
+    private TableName(String schemaName, String tableName, boolean normalize) {
+        this.schemaName = normalize ? SchemaUtil.normalizeIdentifier(schemaName) : schemaName;
+        this.isSchemaNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(schemaName) : false;
+        this.tableName = normalize ? SchemaUtil.normalizeIdentifier(tableName) : tableName;
+        this.isTableNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(tableName) : false;
+    }
+    
+    public boolean isTableNameCaseSensitive() {
+        return isTableNameCaseSensitive;
+    }
+
+    public boolean isSchemaNameCaseSensitive() {
+        return isSchemaNameCaseSensitive;
     }
 
     public String getTableName() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
index f271ac5..c6dd1f4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
@@ -89,11 +89,15 @@ public class ColumnRef {
         if (!tableRef.equals(other.tableRef)) return false;
         return true;
     }
-
+    
     public ColumnExpression newColumnExpression() {
+        return newColumnExpression(false, false);
+    }
+
+    public ColumnExpression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
         PTable table = tableRef.getTable();
         PColumn column = this.getColumn();
-        String displayName = tableRef.getColumnDisplayName(this);
+        String displayName = tableRef.getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive);
         if (SchemaUtil.isPKColumn(column)) {
             return new RowKeyColumnExpression(
                     column, 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
index 362d59f..62ef431 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
@@ -61,7 +61,7 @@ public class LocalIndexDataColumnRef extends ColumnRef {
     }
 
     @Override
-    public ColumnExpression newColumnExpression() {
+    public ColumnExpression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
         PTable table = this.getTable();
         PColumn column = this.getColumn();
         // TODO: util for this or store in member variable

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index a88ba4d..b64912b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.schema;
 
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -69,31 +68,29 @@ public class TableRef {
         return alias;
     }
 
-    public String getColumnDisplayName(ColumnRef ref) {
+    public String getColumnDisplayName(ColumnRef ref, boolean cfCaseSensitive, boolean cqCaseSensitive) {
+        String cf = null;
+        String cq = null;       
         PColumn column = ref.getColumn();
+        String name = column.getName().getString();
+        boolean isIndex = table.getType() == PTableType.INDEX;
         if (table.getType() == PTableType.JOIN || table.getType() == PTableType.SUBQUERY) {
-            return column.getName().getString();
+            cq = column.getName().getString();
         }
-        boolean isIndex = table.getType() == PTableType.INDEX;
-        if (SchemaUtil.isPKColumn(column)) {
-            String name = column.getName().getString();
-            if (isIndex) {
-                return IndexUtil.getDataColumnName(name);
-            }
-            return name;
+        else if (SchemaUtil.isPKColumn(column)) {
+            cq = isIndex ? IndexUtil.getDataColumnName(name) : name;
         }
-        
-        if (isIndex) {
-            // Translate to the data table column name
-            String indexColumnName = column.getName().getString();
-            String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColumnName);
-            String dataColumnName = IndexUtil.getDataColumnName(indexColumnName);
+        else {
             String defaultFamilyName = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : table.getDefaultFamilyName().getString();
-            return SchemaUtil.getColumnDisplayName(defaultFamilyName.equals(dataFamilyName) ? null : dataFamilyName, dataColumnName);
+            // Translate to the data table column name
+            String dataFamilyName = isIndex ? IndexUtil.getDataColumnFamilyName(name) : column.getFamilyName().getString() ;
+            cf = defaultFamilyName.equals(dataFamilyName) ? null : dataFamilyName;
+            cq = isIndex ? IndexUtil.getDataColumnName(name) : name;
         }
-        byte[] defaultFamily = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : table.getDefaultFamilyName().getBytes();
-        String displayName = SchemaUtil.getColumnDisplayName(Bytes.compareTo(defaultFamily, column.getFamilyName().getBytes()) == 0  ? null : column.getFamilyName().getBytes(), column.getName().getBytes());
-        return displayName;
+        
+        cf = (cf!=null && cfCaseSensitive) ? "\"" + cf + "\"" : cf;
+        cq = cqCaseSensitive ? "\"" + cq + "\"" : cq;
+        return SchemaUtil.getColumnDisplayName(cf, cq);
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 8245df5..39e13bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
@@ -442,7 +441,7 @@ public class IndexUtil {
         PhoenixStatement statement = new PhoenixStatement(conn);
         TableRef indexTableRef = new TableRef(index) {
             @Override
-            public String getColumnDisplayName(ColumnRef ref) {
+            public String getColumnDisplayName(ColumnRef ref, boolean cfCaseSensitive, boolean cqCaseSensitive) {
                 return '"' + ref.getColumn().getName().getString() + '"';
             }
         };


[46/50] [abbrv] phoenix git commit: PHOENIX-1690 IndexOutOfBoundsException during SkipScanFilter interesect

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/49f06b33/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java
new file mode 100644
index 0000000..29e14bf
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java
@@ -0,0 +1,717 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Properties;
+import java.util.SortedMap;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.end2end.Shadower;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.stats.GuidePostsInfo;
+import org.apache.phoenix.schema.stats.PTableStats;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+
+
+public class SkipScanBigFilterTest extends BaseConnectionlessQueryTest {
+    private static final byte[][] REGION_BOUNDARIES_MINIMAL = {
+        Bytes.toBytesBinary("\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        };
+
+    private static final byte[][] GUIDE_POSTS_MINIMAL = {
+        Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x07#[j\\x80\\x00\\x00\\x00Y\\x08u\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3U\\x88\\xFF\\x80\\x00\\x00\\x00\\x84\\xBFJ\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+    };
+
+    
+    private static final byte[][] REGION_BOUNDARIES_ALL = {
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y3\\xF7P\\x80\\x00\\x00\\x00B\\xE7\\xF6F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oI\\x17B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xFA._\\xE2\\x80\\x00\\x00\\x00\\x98\\xFE2\\xF5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Da\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1%"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xB4]\\xE7\\x80\\x00\\x00\\x00ER\\xFE#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01p5R\\xD0\\x80\\x00\\x00\\x00@W\\xCC\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xC7U\\x80\\x00\\x00\\x00h\\xDF\"\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x8E\\xB0\\x80\\x00\\x00\\x00yM\\xD7\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xCA\\x85\\xFB\\x80\\x00\\x00\\x00}\\xA3*\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y\\x17\\x8B<\\x80\\x00\\x00\\x00i'\\xE8\\xC4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oK\\x11_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x07\\x15\\x12MOBILE\\x00\\x80\\x00\\x00\\x01a\\x02js\\x80\\x00\\x00\\x00@Y\\xC7\\x0C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0E\\x04@\\x8C\\x80\\x00\\x00\\x00o>\\xB1\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        };
+
+    private static final byte[][] GUIDE_POSTS_ALL = {
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01!y\\xC3\\x80\\x80\\x00\\x00\\x00+\\xB0)u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90h\\xE8;\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE7x\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x14'_\\xF5\\x80\\x00\\x00\\x00(\\xF9\\xDD\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF1\\xD8d\\x80\\x00\\x00\\x00\\x9B\\xC2A\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&9JM\\x80\\x00\\x00\\x00w\\x1A\\xF5\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xAAKT\\x80\\x00\\x00\\x00w\\x98{@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&yD\\x10\\x80\\x00\\x00\\x00w'f\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\xA1\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&%U\\x1B\\x80\\x00\\x00\\x00w\\x19u\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDBV\\x5C\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xF2\\xE3\\xA1\\xD8\\x80\\x00\\x00\\x00\\x02\\x9DY\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xBA\\xDC\\xEF\\x80\\x00\\x00\\x00\\x99l\\x0D\\xD2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\\xC6\\x0C\\x80\\x00\\x00\\x00w\"\\xDE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xA516\\x80\\x00\\x00\\x00EL\\xE1\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`1!\\xA1\\x80\\x00\\x00\\x00;\\xF4\\x8B\\xD4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`~.@\\x80\\x00\\x00\\x00<\\x03\\x85\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\xAF\\x98\\x80\\x00\\x00\\x00o\\x17\\xB9\\x82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x80)\\xB7\\x80\\x00\\x00\\x00fo5]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y3a\\x7F\\x80\\x00\\x00\\x00X\\xC7\\xE3\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x81\\x0Bb\\x80\\x00\\x00\\x00<\\x04s\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x04\\x7F1DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026U\\x05\\x80\\x00\\x00\\x00kF\\x16(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xF1+\\x80\\x00\\x00\\x00~J\\x87\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Jg&\\xF4\\x80\\x00\\x00\\x00o\\x10\\xC8\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p5BN\\x80\\x00\\x00\\x00i\\x0El]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8t\\x80\\x00\\x00\\x00<\\x0C\\x10\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xD1\\xBE7\\x80\\x00\\x00\\x00\\x8A\\xFA_\\xDC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xD7\\xEE\\x19\\x80\\x00\\x00\\x00\\x89\\xEC\\xB4\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p3Ja\\x80\\x00\\x00\\x00tM{\\xBA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F#n\\x80\\x00\\x00\\x00i\\xC9f\\xB2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a\\x02b\\xAA\\x80\\x00\\x00\\x00h\\xDF9\\xDA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE3!\\xC8\\x80\\x00\\x00\\x00\\x89\\xFD\\x1D\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x99=\\x9F\\x80\\x00\\x00\\x00i\\xC2\\x9D\\x98\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDD\\xDC\\xB5\\x80\\x00\\x00\\x00\\x89\\xE5q=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xCB\\x80\\x00\\x00\\x00\\x89\\xE89\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02y\\xD1d+\\x80\\x00\\x00\\x00o\\x18\\xC7,\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x7F&\\x16\\x80\\x00\\x00\\x00<\\x03\\xE5l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xAE\\x1E\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6}\\xD7\\x80\\x00\\x00\\x00E^\\x83\\x8F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xCC\\x84\\x80\\x00\\x00\\x00o\\x1A\\xC6\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1DN#\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF5\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xC7TABLET\\x00\\x80\\x00\\x00\\x02Ar2q\\x80\\x00\\x00\\x00\\x98\\x9BF|\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x86.\\xF0\\xF4\\x80\\x00\\x00\\x00\\x98\\x9B`1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x91\\xD3w\\xB3\\x80\\x00\\x00\\x00\\xA00\\x5C\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x13D^:\\x80\\x00\\x00\\x00p\\x8F\\xA6\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0Fq\\xA5\\x80\\x00\\x00\\x00p\\x84w\\x8B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14a\\xB2\\xE6\\x80\\x00\\x00\\x00q\\x09\\x83\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x170f\\xF3\\x80\\x00\\x00\\x00q\\xD4u(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14\\x83\\x88l\\x80\\x00\\x00\\x00q\\x11\\xAB\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BZ\\xE7\\x9E\\x80\\x00\\x00\\x00s~\\xF8\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&~\\xE2\\xAB\\x80\\x00\\x00\\x00w(\\xD2N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91W\\xCD\\xBE\\x80\\x00\\x00\\x00\\x0E\\xAD\\x0A~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&4\\xE0\\x1A\\x80\\x00\\x00\\x00w\\x1A\\xA6\\x99\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF5\\xF1\\xDD\\x80\\x00\\x00\\x00\\x99m6q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&<\\xD6\\xB9\\x80\\x00\\x00\\x00w\\x1B4-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA7\\xA4\\xED\\x80\\x00\\x00\\x00w\\x97Lb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&q\"]\\x80\\x00\\x00\\x00w$\\xD6\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x05y\\xEF\\x80\\x00\\x00\\x00w\\x17\\x19c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x8A\\xBB\\xB1\\xDE\\x80\\x00\\x00\\x00\\x0EAU\\xE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&fS\\xE3\\x80\\x00\\x00\\x00w\"\\x8C\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\x99\\xF6\\x80\\x00\\x00\\x00\\x15,E\\xC7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&g.\\x93\\x80\\x00\\x00\\x00w\"\\xAA\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&d\\x15?\\x80\\x00\\x00\\x00w\")\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+ \\x16\\x19\\x80\\x00\\x00\\x00x\\x95\\xE4\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\xBB\\x92\\xA1\\x96\\x80\\x00\\x00\\x00\\x14J\\xAEd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2MOBILE\\x00\\x80\\x00\\x00\\x02n\\x95\\xD0N\\x80\\x00\\x00\\x00\\x92\\x0DF\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xE1]\\x1D\\x80\\x80\\x00\\x00\\x00}\\x1A\\xA8e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00$[\\x80\\x00\\x0D\\xDB\\x80\\x058\\x0BDESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00n\\xE5\\xBF\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00$[\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2\\x9F\\xFA\\x80\\x00\\x00\\x00\\x98c\\xD3D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDE\\x80\\x01i\\xF6DESKTOP\\x00\\x80\\x00\\x00\\x00\\xFEJ\\xDA\\x83\\x80\\x00\\x00\\x00\\x8D\\x8A\\xD1\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAE\\x95\\xC7\\x80\\x00\\x00\\x00\\x86\\x04\\xF7[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x06k\\xCB\\x13\\x80\\x00\\x00\\x00\\x84\\xC0N\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E*\\x80\\x01i\\xF5TABLET\\x00\\x80\\x00\\x00\\x01s\\xAE\\x98\\xF9\\x80\\x00\\x00\\x00\\x9A\\xD4\\xF0\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xC2fi\\x16\\x80\\x00\\x00\\x00\\x97\\xE1:Z\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E?\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAEE\\x94\\x80\\x00\\x00\\x00\\x98\\xF4j\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0EL\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x002H\\x8C\\xF7\\x80\\x00\\x00\\x00\\x88\\xF6\\xC3F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x5C\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01s\\xAD\\xDE1\\x80\\x00\\x00\\x00\\x9F\\xE1`\\x02\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0D\\xDF\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x01\\xB7@\\x9C\\x89\\x80\\x00\\x00\\x00V\\x81\\x8E\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\\x08\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x02\\x00!\\x9F\\xF3\\x80\\x00\\x00\\x00]9N\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\"\\x80\\x01i\\xEDOTHER\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x98\\xD1]\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91\\xA6<J\\x80\\x00\\x00\\x00\\x98\\xD0\\xF5Y\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E9\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02\\x00)T\\x8C\\x80\\x00\\x00\\x00\\x9E[\\xB5I\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0EI\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91\\xA7\\xD7C\\x80\\x00\\x00\\x00\\x9F\\xAA\\x0E\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01\\xB7@\\x9Ee\\x80\\x00\\x00\\x00\\x9F\\xA9h\\xA7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\x95 o\\x5C\\x80\\x00\\x00\\x00\\xA3$t\\xEE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00=\\x96\\x80\\x00\\x0E\\x1E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xF3\\xA9[>\\x80\\x00\\x00\\x00f\\x1A\\xB1\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0Dh\\x80\\x02p DESKTOP\\x00\\x80\\x00\\x00\\x01g\\x8B\\x81#\\x80\\x00\\x00\\x00?J\\xDC\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02p\\xE4 ~\\x80\\x00\\x00\\x00\\x8F\\x05\\xDA\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o(\\x97\\xCA\\x80\\x00\\x00\\x00V\\xDF\\xC8\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01Ud\\x1D\\xF2\\x80\\x00\\x00\\x00V\\xE0\\x95\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01I#\"6\\x80\\x00\\x00\\x00>\\x1E\\xDF\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$:\\x1BG\\x80\\x00\\x00\\x00V\\xDE\\xFD\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01KpC\\x07\\x80\\x00\\x00\\x006\\xCE}5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EP\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\xDC<\\x02\\x80\\x00\\x00\\x00X\\xAB\\xC6\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EV\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC3B\\x82\\xA5\\x80\\x00\\x00\\x00\\x90\\x1B\\x8F-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E]\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC0\\xDCBU\\x80\\x00\\x00\\x00\\x93K\\x86\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01$\\xAE\\xD2\\x0A\\x80\\x00\\x00\\x00?J\\xDD\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x5C\\xBB\\x80\\x00\\x0D\\xFA\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02z*\\xDE2\\x80\\x00\\x00\\x00\\x92\\xFF\\xEEp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dj\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02.\\xD21\\x80\\x00\\x00\\x00kF\\x15b\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CIZ\\xF4\\x80\\x00\\x00\\x00o\\x1D<&\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x028\\xCDo\\xC5\\x80\\x00\\x00\\x00}\\xA3\\x88\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0Cb\\xDFn\\x80\\x00\\x00\\x00~J\\xA7n\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x87\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01y\\x18K\\x92\\x80\\x00\\x00\\x00B\\xEE\\xF2?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x8F\"Z\\x80\\x00\\x00\\x00<\\x06\\xDB!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x90B\\x12\\x80\\x00\\x00\\x00<\\x07(@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\x95\\xB6\\x80\\x00\\x00\\x00fo\\x84\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x903r\\x80\\x00\\x00\\x00i\\x0Ep\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01` \\xF0i\\x80\\x00\\x00\\x00;\\xF2=\\xBF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DRS\\x80\\x00\\x00\\x00i\\xC2c\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\x1DM\\xD8\\x80\\x00\\x00\\x00\\x8A\\x1Ak\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\"\\x12\\x9A\\x80\\x00\\x00\\x00\\x8A\\x1B\\xD0P\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDDQ\\xAD\\x80\\x00\\x00\\x00\\x89\\xDF\\x8D!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y ,\\xDC\\x80\\x00\\x00\\x00\\x8A\\x1A\\x81\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x87\\x01\\x14\\x80\\x00\\x00\\x00fp<\\xDD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD8\\xD6j\\x80\\x00\\x00\\x00\\x89\\xF6#\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE4\\x04\\xE6\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE3\\x06\\xBC\\x80\\x00\\x00\\x00\\x8A\\x09p<\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\xED8\\xD8\\x80\\x00\\x00\\x00h\\xDE\\xD82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x07\\x15\\x04DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x91\\xBF\\x80\\x00\\x00\\x00E]\\x98\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAb\\x0A\\x80\\x00\\x00\\x00\\x89\\xDD\\xE2\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C.z\\xDF\\x80\\x00\\x00\\x00o\\x1B\\xC6#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0ED\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YL\\xB0\\xE1\\x80\\x00\\x00\\x00\\x8A\\xB9X\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x1C\\xD0\\x80\\x00\\x00\\x00<\\x03J_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C?\\xA2#\\x80\\x00\\x00\\x00oKT#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01_\\xDA\\xD6\\xF1\\x80\\x00\\x00\\x00;\\xF2\\x08f\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5C\\xB1-\\x83\\x80\\x00\\x00\\x00\\x8Ap74\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xD3DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC1\\x80\\x00\\x00\\x00\\x9EHL\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCZiE\\x80\\x00\\x00\\x00u\\xD7\\xC8\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BtA\\xFE\\x80\\x00\\x00\\x00s\\x83\\xB5\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!'\\xA4\\x13\\x80\\x00\\x00\\x00u\\x9E\\xD7l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14}~v\\x80\\x00\\x00\\x00q\\x10\\xE6\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0AU\\xF5\\x80\\x00\\x00\\x00p\\x840\\x85\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17%\\x96\\x86\\x80\\x00\\x00\\x00q\\xD1v\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x86\\xC2\"\\x80\\x00\\x00\\x00q\\x12<\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY;{\\x80\\x00\\x00\\x00\\x8A-\\x0A/\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x14'l\\xA2\\x80\\x00\\x00\\x00(\\xF9\\xF1{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFCmU\\x80\\x00\\x00\\x00\\x02\\x93l\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,c\\xDE+\\x80\\x00\\x00\\x00\\x9B\\xC2e8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&]\\x9E\\x94\\x80\\x00\\x00\\x00w \\x90\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA8\\x91\\xD7\\x80\\x00\\x00\\x00w\\x97\\x8D>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x80/\\xCD\\x80\\x00\\x00\\x00w)4\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&]\\xA5\\xFC\\x80\\x00\\x00\\x00w \\xB2\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x18\\xCF\\x81\\x80\\x00\\x00\\x00w\\x18\\xBE\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDB>B\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x04\\xDF\\x9B\\x80\\x00\\x00\\x00\\x99k\\xF2\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC3\\x95*\\x80\\x00\\x00\\x00x\\x95\\xD5\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+!#!\\x80\\x00\\x00\\x00x\\x95\\xE5\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\xA1\\x873\\x80\\x00\\x00\\x00yf\\x12D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02}r[`\\x80\\x00\\x00\\x00\\x92\\xBA\\xF0\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2DESKTOP\\x00\\x80\\x00\\x00\\x012\\xCD{\\xD9\\x80\\x00\\x00\\x000[\\xA1u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x08x\\x80\\x00\\x00\\x00=\\x92\\xF2-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x02DTk\\x80\\x00\\x00\\x00kK\\x88r\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\x9Dd\\x80\\x00\\x00\\x00~J\\xAC\\xFC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CWE\\xAC\\x80\\x00\\x00\\x00o\\x10\\xB7\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y\\xA6\\xE9n\\x80\\x00\\x00\\x00X\\xCC\\xAF*\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DU\\x19\\x80\\x00\\x00\\x00i\\xC2^S\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x84&\\x91\\x1F\\x80\\x00\\x00\\x00O\\xF6\\xC7\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82f\\x02O\\x80\\x00\\x00\\x00E?\\xAF\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\x96)[\\x80\\x00\\x00\\x00EE\\xF5\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY,O\\x80\\x00\\x00\\x00\\x8A-\\x09\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90m\\xAB\\xC6\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE9X\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFF\\xBA\\xA2\\x80\\x00\\x00\\x00\\x02\\x93F\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,g\\x89+\\x80\\x00\\x00\\x00\\x9B\\xC2n'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&`\\xABX\\x80\\x00\\x00\\x00w!\\x98m\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,f\\xF9\\x7F\\x80\\x00\\x00\\x00yL\\x83_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+\\xE2\\xC9\\xE6\\x80\\x00\\x00\\x00yL5\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\x96\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&Q\\x143\\x80\\x00\\x00\\x00w\\x1E\\x0F\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD6\\x15\\xCB\\x80\\x00\\x00\\x00\\x0C\\xDB\\x92\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBE\\x83 9\\x80\\x00\\x00\\x00\\x0C\\xDB\\x93\\x80\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC4\\xD3\\xA7\\x80\\x00\\x00\\x00x\\x95\\xD6+\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x03t\\xC5OTHER\\x00\\x80\\x00\\x00\\x01p+I\\xE4\\x80\\x00\\x00\\x00h\\xDE\\xDA\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{\\x1D\\xF9\\x80\\x00\\x00\\x00<\\x03\\x17}\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x82\\xC7\\x85\\x80\\x00\\x00\\x00i\\xA97#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CbU\\xD0\\x80\\x00\\x00\\x00~J\\xA3\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x0CXM\\x87\\x80\\x00\\x00\\x00o\\x11TY\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01p7\\xDC\\x83\\x80\\x00\\x00\\x00@\\x5C`\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{%\\x14\\x80\\x00\\x00\\x00<\\x03\\x1F\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xC0\\xDE\\x1E\\x80\\x00\\x00\\x00EY\\xB2\\xF4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6\\x07\\xE8\\x80\\x00\\x00\\x00O\\xF6\\xE8\\xA6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8\\x9F\\x80\\x00\\x00\\x00<\\x0C\\x08\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xB6/\\x80\\x00\\x00\\x00h\\xDF\"\\xEB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC9O\\x80\\x00\\x00\\x00<\\x0C\\x13\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xBFT*\\x80\\x00\\x00\\x00\\x8A\\xFAA\\x8C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xBD\\xBF\\x80\\x00\\x00\\x00y\"N\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p4:\\xDA\\x80\\x00\\x00\\x00u\\xAE\\x95q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F*\\xA8\\x80\\x00\\x00\\x00i\\xC9F\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x0D\\xAF\\x80\\x00\\x00\\x00=\\x94\\xC4)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE02\\xE6\\x80\\x00\\x00\\x00\\x8A\\x0F6(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xD19\\x80\\x00\\x00\\x00h\\xDE\\x9Df\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB2\\xB5\\x80\\x00\\x00\\x00\\x89\\xE36\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xC3\\x80\\x00\\x00\\x00\\x89\\xE13\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02`u&J\\x80\\x00\\x00\\x00\\x8B\\xB7)\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\x0D\\x80\\x00\\x00\\x00o\\x11@\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1E\\x9C1\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF6?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e]\\xF8q\\x80\\x00\\x00\\x00i\\x0E\\xBE\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\xC7\\xE8\\xB1\\x80\\x00\\x00\\x00o\\x12\\x1B\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01h\\x13I\\x9E\\x80\\x00\\x00\\x00i\\x0D\\xD6\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02m\\x07\\x90\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02\\x86/\\xEE\\x1C\\x80\\x00\\x00\\x00\\x98\\x9B\\x5C\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02E\\xEC@\\x05\\x80\\x00\\x00\\x00Y\\x12\\xC4\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!\\x18T\\xAC\\x80\\x00\\x00\\x00u\\x9C\\xDA\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14`\\x9C6\\x80\\x00\\x00\\x00q\\x09Hp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x1B\\x14o\"\\x80\\x00\\x00\\x00sZ\\xFFN\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x82t{\\x80\\x00\\x00\\x00qRS\\x1B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x16\\xBC\\x8C|\\x80\\x00\\x00\\x00q\\xAA\\x14\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17\\x02O\\xE9\\x80\\x00\\x00\\x00q\\xC9E\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y41[\\x80\\x00\\x00\\x00B\\xE7\\xEDE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y6&\\x83\\x80\\x00\\x00\\x00B\\xE8\\x96\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x15<\\x80\\x00\\x00\\x00<\\x03D\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x8C\\xE1\\x80\\x00\\x00\\x00o\\x1C(\\xA7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8E'\\x99\\x80\\x00\\x00\\x00b\\xF3\\xABM\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01a\\x01\\x1E\\xB7\\x80\\x00\\x00\\x00h\\xDE\\x93\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x01\\x84!\\xA5\\x8F\\x80\\x00\\x00\\x00E\\xD7\\xA6)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0C+!\\x0E\\x80\\x00\\x00\\x00oI\\xEC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026l\\xA0\\x80\\x00\\x00\\x00EJ\\x18\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Jg$=\\x80\\x00\\x00\\x00;\\xF2d.\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xA7\\xE7\\x80\\x00\\x00\\x00\\x8A\\x07\\x07\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01e\\x5CF^\\x80\\x00\\x00\\x00=\\x92\\x87:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4Rm\\x80\\x00\\x00\\x00o\\x1C8\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD9\\xF2\\xCF\\x80\\x00\\x00\\x00\\x89\\xEE\"\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH1\\xB0\\x80\\x00\\x00\\x00\\x8A\\xB8\\xE3\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\xF0\\x80\\x00\\x00\\x00\\x903\\xAC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YN/5\\x80\\x00\\x00\\x00\\x8A\\x1E)\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xCBo\\x80\\x00\\x00\\x00\\x89\\xEE&M\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0J\\x0B\\x80\\x00\\x00\\x00\\x8A\\x0An\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02Y\\x1D\\xFD\\xD0\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF4\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02s\\x16\\xF1\\xF6\\x80\\x00\\x00\\x00\\x95R\\x03\\xD8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y,\\xF0\\xE1\\x80\\x00\\x00\\x00\\x8A\\x1D,\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e\\x5C8\\xE1\\x80\\x00\\x00\\x00i\\x0E\\x9D\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDD\\x08\\x8B\\x80\\x00\\x00\\x00\\x89\\xEA\\x05k\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02a\\xC9\\x1D\\x10\\x80\\x00\\x00\\x00\\x96\\x06\\x18\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x13T\\x04\\x80\\x00\\x00\\x00i\\x0D\\xE8\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0x\\xA4\\x80\\x00\\x00\\x00\\x89\\xDFT\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1B\\x80\\x05\\xD8\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC9M\\xF4b\\x80\\x00\\x00\\x00\\x98\\x9AJy\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xC9DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC5\\x80\\x00\\x00\\x00m\\x12\\x9DA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCX8\"\\x80\\x00\\x00\\x00Wy\\x11\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B\\x1A\\xD6\\x99\\x80\\x00\\x00\\x00s]\\x01U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bg`\\xFF\\x80\\x00\\x00\\x00s\\x82B>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x14\\x8B\\xA5\\x90\\x80\\x00\\x00\\x00q\\x13\\xF5c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xB7m\\x80\\x00\\x00\\x00qR\\x02\\xF1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x16}In\\x80\\x00\\x00\\x00q\\x9F\\xCF\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bq\\xC2\\xCB\\x80\\x00\\x00\\x00s\\x83s\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$\\xD6\\x5Cm\\x80\\x00\\x00\\x00,\\xBE\\x1A\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x9D\\xE5\\x15\\xA1\\x80\\x00\\x00\\x00a\\xE9D\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xF9^%\\x80\\x00\\x00\\x00\\x02\\x93n\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x83\\xBD\\x95\\x80\\x00\\x00\\x00\\x99ki_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02%\\xF8\\xF5\\xDB\\x80\\x00\\x00\\x00w\\x16Fr\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'x\\x1D\\xA9\\x80\\x00\\x00\\x00x\\x00Fb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&Z1\\xCB\\x80\\x00\\x00\\x00w\\x1F\\xB4\\xF2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02+\\xE3b*\\x80\\x00\\x00\\x00yL8p\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&b\\xA9H\\x80\\x00\\x00\\x00w!\\xA20\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&\\x00.\\xC9\\x80\\x00\\x00\\x00w\\x16\\x8F\\xB8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02*\\xCEzM\\x80\\x00\\x00\\x00\\x99l\\x16e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x5C\\x0B\\xAA\\x80\\x00\\x00\\x00w \\x08\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x18\\xDD\\xFC\\x80\\x00\\x00\\x00w\\x18\\xBE\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e\\x5C-Y\\x80\\x00\\x00\\x00=\\x94\\xD7\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01zjc\\x9A\\x80\\x00\\x00\\x00i\\x0Eg$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x87\\x01\\x80\\x00\\x00\\x00oI\\xA1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6~\\x06\\x80\\x00\\x00\\x00O\\xF6\\xA9\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y1\\xD2\\xE8\\x80\\x00\\x00\\x00X\\xC6]1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x803}\\x80\\x00\\x00\\x00<\\x04*\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\x88\\xCF\\x83\\x80\\x00\\x00\\x00EA\\x0D\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Jg$o\\x80\\x00\\x00\\x00;\\xF2VF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xA4\\x16\\x80\\x00\\x00\\x00h\\xDF-\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cd\\x099\\x80\\x00\\x00\\x00~J\\xAF\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9DM\\xDB\\x80\\x00\\x00\\x00i\\xCAK\\x0F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01z\\x5C\\x1C\\x1D\\x80\\x00\\x00\\x00\\x7FX\\x85\\x07\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD4E\\xF4\\x80\\x00\\x00\\x00\\x89\\xE1\\xA5\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x85\\x05\\x94\\x80\\x00\\x00\\x00i\\xA6\\x82C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xAB\\xC0\\x11\\x80\\x00\\x00\\x00<\\x0B\\xBE\\xAE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDE\\x03\\xDD\\x80\\x00\\x00\\x00\\x89\\xF71o\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB4\\x19\\x80\\x00\\x00\\x00\\x89\\xE1_{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9Dc\\xDA\\x80\\x00\\x00\\x00i\\xC9\\xEB\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x12\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD9<\\x80\\x00\\x00\\x00oIQ\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YH;\\x0B\\x80\\x00\\x00\\x00\\x8A\\xB9\\x0B\\x01\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\xA4;\\x80\\x00\\x00\\x00fvo\\xC3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a$\\xD3\\xB9\\x80\\x00\\x00\\x00<&\\x98\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02X\\xB8J\\x18\\x80\\x00\\x00\\x00\\x8A\\x1B.2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH\\xBB\\x10\\x80\\x00\\x00\\x00\\x8A\\x1CK5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xAC\\xCF\\x80\\x00\\x00\\x00h\\xDF&V\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD6!^\\x80\\x00\\x00\\x00\\x8A\\x0E\\x08\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD7;\\x09\\x80\\x00\\x00\\x00\\x89\\xDC\\x868\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x05\\xD8\\xCBTABLET\\x00\\x80\\x00\\x00\\x02m\\x05\\x10\\xDC\\x80\\x00\\x00\\x00\\x98\\x9AJ\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x05\\xD8\\xD0MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\x93\\xD4\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xBCcx\\x86\\x80\\x00\\x00\\x00Wz\\xADo\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x83\\xA7\\x97\\x80\\x00\\x00\\x00qR\\x7Fd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B_\\xB4S\\x80\\x00\\x00\\x00s\\x81%\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xCD\\xAD\\x80\\x00\\x00\\x00qR\\x03\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14hQ\\xF8\\x80\\x00\\x00\\x00q\\x0CN\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BYk\"\\x80\\x00\\x00\\x00s|\\xE9U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x88\\xEE\\xC5\\x80\\x00\\x00\\x00q\\x12\\xDD\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00$\\xB5u\\x0D\\x80\\x00\\x00\\x00\\x02\\x97\\xAA\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00x{\\xB2\\xDB\\x80\\x00\\x00\\x00\\x0C\\xE3\\xB8\\x92\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xn\\xAFo\\x80\\x00\\x00\\x00\\x0C\\xE3\\x87\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&~\\x14b\\x80\\x00\\x00\\x00\\x99kw\\xD1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&+i\\xF9\\x80\\x00\\x00\\x00w\\x19\\xD6:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA3\\xF1\\xBA\\x80\\x00\\x00\\x00w\\x96S\\x1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\"z\\x80\\x00\\x00\\x00w\"\\xDF\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,s.}\\x80\\x00\\x00\\x00yL\\xBBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDE6\\xB2\\x80\\x00\\x00\\x00\\x0C\\xDB\\x99\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&`\\xBF7\\x80\\x00\\x00\\x00w!}\\xC9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\xB2\\xB7\\x80\\x00\\x00\\x00\\x0C\\xDB\\x8E\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC0{G\\x80\\x00\\x00\\x00\\x99l\\x11\\x9F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01p8kT\\x80\\x00\\x00\\x00sQ\\x05F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x82\\xEEN\\x97\\x80\\x00\\x00\\x00E{\\x11\\x08\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1\\x15\\xB3\\x80\\x00\\x00\\x00;\\xF4\\x82\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xE4m\\x80\\x00\\x00\\x00~J\\x9C~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8B9\\xA3\\x80\\x00\\x00\\x00b\\xF6R\\x9E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o\\xBCKV\\x80\\x00\\x00\\x00fo8$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`y\\xE2q\\x80\\x00\\x00\\x00<\\x02\\xDA\\xAC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xB9\\xA7\\x80\\x00\\x00\\x00y\"N\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02 \\x0E\\xEB\\x80\\x00\\x00\\x00kF\\x12\\xCA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC9qi\\x80\\x00\\x00\\x00E`\\xAE_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01zd\\xF0\\x1C\\x80\\x00\\x00\\x00B\\xED\\xFBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YKjG\\x80\\x00\\x00\\x00\\x8A\\xA07w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xC5{\\xD4\\x80\\x00\\x00\\x00\\x8A\\xFAMq\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xDF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02`wo\\xC8\\x80\\x00\\x00\\x00\\x8B\\xB7\\xA6\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x85\\x13\\xC1\\x80\\x00\\x00\\x00i\\xA97\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF3\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9F\\x1F<\\x80\\x00\\x00\\x00i\\xC9u\\x17\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFD\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01zd]\\x0B\\x80\\x00\\x00\\x00i\\xA97\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x07\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xE2\\x97\\x08\\x80\\x00\\x00\\x00\\x89\\xDAU\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01/\"6\\xC4\\x80\\x00\\x00\\x00+\\xB0(\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xF3\\x100+\\x80\\x00\\x00\\x00\\x02\\x92\\xF5[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xs\\xFFB\\x80\\x00\\x00\\x00\\x0C\\xE3\\x97\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"),
+        Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&i\\x0C\\x00\\x8

<TRUNCATED>

[39/50] [abbrv] phoenix git commit: Surface partial saves in CommitExcepiton (PHOENIX-900) from https://github.com/apache/phoenix/pull/37

Posted by ma...@apache.org.
Surface partial saves in CommitExcepiton (PHOENIX-900) from https://github.com/apache/phoenix/pull/37


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fa58c782
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fa58c782
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fa58c782

Branch: refs/heads/calcite
Commit: fa58c7821a2e8fce30a8c0ff6e42aa00134dbce0
Parents: dab9d51
Author: Eli Levine <el...@apache.org>
Authored: Thu Feb 26 20:50:02 2015 -0800
Committer: Eli Levine <el...@apache.org>
Committed: Thu Feb 26 20:50:02 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/execute/PartialCommitIT.java | 302 +++++++++++++++++++
 .../apache/phoenix/compile/DeleteCompiler.java  |  13 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |  13 +-
 .../apache/phoenix/execute/CommitException.java |  35 ++-
 .../apache/phoenix/execute/MutationState.java   | 156 ++++++----
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  37 ++-
 .../phoenix/jdbc/PhoenixPreparedStatement.java  |   7 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   3 +
 .../phoenix/execute/MutationStateTest.java      |  64 ++++
 .../java/org/apache/phoenix/query/BaseTest.java |   2 +-
 10 files changed, 543 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
new file mode 100644
index 0000000..550d7de
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2014 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.singletonList;
+import static org.apache.phoenix.query.BaseTest.initAndRegisterDriver;
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
+import static org.apache.phoenix.util.TestUtil.LOCALHOST;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.Indexer;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class PartialCommitIT {
+    
+    private static final String TABLE_NAME_TO_FAIL = "b_failure_table".toUpperCase();
+    private static final byte[] ROW_TO_FAIL = Bytes.toBytes("fail me");
+    private static final String UPSERT_TO_FAIL = "upsert into " + TABLE_NAME_TO_FAIL + " values ('" + Bytes.toString(ROW_TO_FAIL) + "', 'boom!')";
+    private static final String UPSERT_SELECT_TO_FAIL = "upsert into " + TABLE_NAME_TO_FAIL + " select k, c from a_success_table";
+    private static final String DELETE_TO_FAIL = "delete from " + TABLE_NAME_TO_FAIL + "  where k='z'";
+    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+    private static String url;
+    private static Driver driver;
+    private static final Properties props = new Properties();
+    
+    static {
+        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 10);
+    }
+    
+    @BeforeClass
+    public static void setupCluster() throws Exception {
+      Configuration conf = TEST_UTIL.getConfiguration();
+      setUpConfigForMiniCluster(conf);
+      conf.setClass("hbase.coprocessor.region.classes", FailingRegionObserver.class, RegionObserver.class);
+      conf.setBoolean("hbase.coprocessor.abortonerror", false);
+      conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
+      TEST_UTIL.startMiniCluster();
+      String clientPort = TEST_UTIL.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
+      url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
+              + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
+
+      Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+      // Must update config before starting server
+      props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+      driver = initAndRegisterDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
+      createTablesWithABitOfData();
+    }
+    
+    private static void createTablesWithABitOfData() throws Exception {
+        Properties props = new Properties();
+        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 10);
+
+        try (Connection con = driver.connect(url, new Properties())) {
+            Statement sta = con.createStatement();
+            sta.execute("create table a_success_table (k varchar primary key, c varchar)");
+            sta.execute("create table b_failure_table (k varchar primary key, c varchar)");
+            sta.execute("create table c_success_table (k varchar primary key, c varchar)");
+            con.commit();
+        }
+
+        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 100);
+
+        try (Connection con = driver.connect(url, new Properties())) {
+            con.setAutoCommit(false);
+            Statement sta = con.createStatement();
+            for (String table : newHashSet("a_success_table", TABLE_NAME_TO_FAIL, "c_success_table")) {
+                sta.execute("upsert into " + table + " values ('z', 'z')");
+                sta.execute("upsert into " + table + " values ('zz', 'zz')");
+                sta.execute("upsert into " + table + " values ('zzz', 'zzz')");
+            }
+            con.commit();
+        }
+    }
+    
+    @AfterClass
+    public static void teardownCluster() throws Exception {
+      TEST_UTIL.shutdownMiniCluster();
+    }
+    
+    @Test
+    public void testNoFailure() {
+        testPartialCommit(singletonList("upsert into a_success_table values ('testNoFailure', 'a')"), 0, new int[0], false,
+                                        singletonList("select count(*) from a_success_table where k='testNoFailure'"), singletonList(new Integer(1)));
+    }
+    
+    @Test
+    public void testUpsertFailure() {
+        testPartialCommit(newArrayList("upsert into a_success_table values ('testUpsertFailure1', 'a')", 
+                                       UPSERT_TO_FAIL, 
+                                       "upsert into a_success_table values ('testUpsertFailure2', 'b')"), 
+                                       1, new int[]{1}, true,
+                                       newArrayList("select count(*) from a_success_table where k like 'testUpsertFailure_'",
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
+                                       newArrayList(new Integer(2), new Integer(0)));
+    }
+    
+    @Test
+    public void testUpsertSelectFailure() throws SQLException {
+        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 100);
+
+        try (Connection con = driver.connect(url, new Properties())) {
+            con.createStatement().execute("upsert into a_success_table values ('" + Bytes.toString(ROW_TO_FAIL) + "', 'boom!')");
+            con.commit();
+        }
+        
+        testPartialCommit(newArrayList("upsert into a_success_table values ('testUpsertSelectFailure', 'a')", 
+                                       UPSERT_SELECT_TO_FAIL), 
+                                       1, new int[]{1}, true,
+                                       newArrayList("select count(*) from a_success_table where k in ('testUpsertSelectFailure', '" + Bytes.toString(ROW_TO_FAIL) + "')",
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
+                                       newArrayList(new Integer(2), new Integer(0)));
+    }
+    
+    @Test
+    public void testDeleteFailure() {
+        testPartialCommit(newArrayList("upsert into a_success_table values ('testDeleteFailure1', 'a')", 
+                                       DELETE_TO_FAIL,
+                                       "upsert into a_success_table values ('testDeleteFailure2', 'b')"), 
+                                       1, new int[]{1}, true,
+                                       newArrayList("select count(*) from a_success_table where k like 'testDeleteFailure_'",
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = 'z'"), 
+                                       newArrayList(new Integer(2), new Integer(1)));
+    }
+    
+    /**
+     * {@link MutationState} keeps mutations ordered lexicographically by table name.
+     */
+    @Test
+    public void testOrderOfMutationsIsPredicatable() {
+        testPartialCommit(newArrayList("upsert into c_success_table values ('testOrderOfMutationsIsPredicatable', 'c')", // will fail because c_success_table is after b_failure_table by table sort order
+                                       UPSERT_TO_FAIL, 
+                                       "upsert into a_success_table values ('testOrderOfMutationsIsPredicatable', 'a')"), // will succeed because a_success_table is before b_failure_table by table sort order
+                                       2, new int[]{0,1}, true,
+                                       newArrayList("select count(*) from c_success_table where k='testOrderOfMutationsIsPredicatable'",
+                                                    "select count(*) from a_success_table where k='testOrderOfMutationsIsPredicatable'",
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
+                                       newArrayList(new Integer(0), new Integer(1), new Integer(0)));
+    }
+    
+    @Test
+    public void checkThatAllStatementTypesMaintainOrderInConnection() {
+        testPartialCommit(newArrayList("upsert into a_success_table values ('k', 'checkThatAllStatementTypesMaintainOrderInConnection')", 
+                                       "upsert into a_success_table select k, c from c_success_table",
+                                       DELETE_TO_FAIL,
+                                       "select * from a_success_table", 
+                                       UPSERT_TO_FAIL), 
+                                       2, new int[]{2,4}, true,
+                                       newArrayList("select count(*) from a_success_table where k='testOrderOfMutationsIsPredicatable' or k like 'z%'", // rows left: zz, zzz, checkThatAllStatementTypesMaintainOrderInConnection
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + ROW_TO_FAIL + "'",
+                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = 'z'"), 
+                                       newArrayList(new Integer(4), new Integer(0), new Integer(1)));
+    }
+    
+    private void testPartialCommit(List<String> statements, int failureCount, int[] expectedUncommittedStatementIndexes, boolean willFail,
+                                   List<String> countStatementsForVerification, List<Integer> expectedCountsForVerification) {
+        Preconditions.checkArgument(countStatementsForVerification.size() == expectedCountsForVerification.size());
+        
+        try (Connection con = getConnectionWithTableOrderPreservingMutationState()) {
+            con.setAutoCommit(false);
+            Statement sta = con.createStatement();
+            for (String statement : statements) {
+                sta.execute(statement);
+            }
+            try {
+                con.commit();
+                if (willFail) {
+                    fail("Expected at least one statement in the list to fail");
+                } else {
+                    assertEquals(0, con.unwrap(PhoenixConnection.class).getStatementExecutionCounter()); // should have been reset to 0 in commit()
+                }
+            } catch (SQLException sqle) {
+                if (!willFail) {
+                    fail("Expected no statements to fail");
+                }
+                assertEquals(CommitException.class, sqle.getClass());
+                int[] uncommittedStatementIndexes = ((CommitException)sqle).getUncommittedStatementIndexes();
+                assertEquals(failureCount, uncommittedStatementIndexes.length);
+                assertArrayEquals(expectedUncommittedStatementIndexes, uncommittedStatementIndexes);
+            }
+            
+            // verify data in HBase
+            for (int i = 0; i < countStatementsForVerification.size(); i++) {
+                String countStatement = countStatementsForVerification.get(i);
+                ResultSet rs = sta.executeQuery(countStatement);
+                if (!rs.next()) {
+                    fail("Expected a single row from count query");
+                }
+                assertEquals(expectedCountsForVerification.get(i).intValue(), rs.getInt(1));
+            }
+        } catch (SQLException e) {
+            fail(e.toString());
+        }
+    }
+    
+    private PhoenixConnection getConnectionWithTableOrderPreservingMutationState() throws SQLException {
+        Connection con = driver.connect(url, new Properties());
+        PhoenixConnection phxCon = new PhoenixConnection(con.unwrap(PhoenixConnection.class));
+        final Map<TableRef,Map<ImmutableBytesPtr,MutationState.RowMutationState>> mutations = Maps.newTreeMap(new TableRefComparator());
+        return new PhoenixConnection(phxCon) {
+            protected MutationState newMutationState(int maxSize) {
+                return new MutationState(maxSize, this, mutations);
+            };
+        };
+    }
+    
+    public static class FailingRegionObserver extends SimpleRegionObserver {
+        @Override
+        public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
+                final Durability durability) throws HBaseIOException {
+            if (shouldFailUpsert(c, put) || shouldFailDelete(c, put)) {
+                // throwing anything other than instances of IOException result
+                // in this coprocessor being unloaded
+                // DoNotRetryIOException tells HBase not to retry this mutation
+                // multiple times
+                throw new DoNotRetryIOException();
+            }
+        }
+        
+        private static boolean shouldFailUpsert(ObserverContext<RegionCoprocessorEnvironment> c, Put put) {
+            String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
+            return TABLE_NAME_TO_FAIL.equals(tableName) && Bytes.equals(ROW_TO_FAIL, put.getRow());
+        }
+        
+        private static boolean shouldFailDelete(ObserverContext<RegionCoprocessorEnvironment> c, Put put) {
+            String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
+            return TABLE_NAME_TO_FAIL.equals(tableName) &&  
+                   // Phoenix deletes are sent as Puts with empty values
+                   put.getFamilyCellMap().firstEntry().getValue().get(0).getValueLength() == 0; 
+        }
+    }
+    
+    /**
+     * Used for ordering {@link MutationState#mutations} map.
+     */
+    private static class TableRefComparator implements Comparator<TableRef> {
+        @Override
+        public int compare(TableRef tr1, TableRef tr2) {
+            return tr1.getTable().getPhysicalName().getString().compareTo(tr2.getTable().getPhysicalName().getString());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 322d24a..6f51a4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -39,6 +39,7 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.BaseQueryPlan;
 import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMetaDataCacheClient;
@@ -106,8 +107,8 @@ public class DeleteCompiler {
         ConnectionQueryServices services = connection.getQueryServices();
         final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
         final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
-        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutations = Maps.newHashMapWithExpectedSize(batchSize);
-        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> indexMutations = null;
+        Map<ImmutableBytesPtr,RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
+        Map<ImmutableBytesPtr,RowMutationState> indexMutations = null;
         // If indexTableRef is set, we're deleting the rows from both the index table and
         // the data table through a single query to save executing an additional one.
         if (indexTableRef != null) {
@@ -147,11 +148,11 @@ public class DeleteCompiler {
                     }
                     table.newKey(ptr, values);
                 }
-                mutations.put(ptr, PRow.DELETE_MARKER);
+                mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
                 if (indexTableRef != null) {
                     ImmutableBytesPtr indexPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
                     rs.getCurrentRow().getKey(indexPtr);
-                    indexMutations.put(indexPtr, PRow.DELETE_MARKER);
+                    indexMutations.put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
                 }
                 if (mutations.size() > maxSize) {
                     throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
@@ -429,9 +430,9 @@ public class DeleteCompiler {
                         // keys for our ranges
                         ScanRanges ranges = context.getScanRanges();
                         Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator(); 
-                        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
+                        Map<ImmutableBytesPtr,RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                         while (iterator.hasNext()) {
-                            mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), PRow.DELETE_MARKER);
+                            mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
                         }
                         return new MutationState(tableRef, mutation, 0, maxSize, connection);
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index b21cc2f..f172814 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -42,6 +42,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
@@ -95,7 +96,7 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class UpsertCompiler {
-    private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation) {
+    private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr,RowMutationState> mutation, PhoenixStatement statement) {
         Map<PColumn,byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
         byte[][] pkValues = new byte[table.getPKColumns().size()][];
         // If the table uses salting, the first byte is the salting byte, set to an empty array
@@ -114,7 +115,7 @@ public class UpsertCompiler {
         }
         ImmutableBytesPtr ptr = new ImmutableBytesPtr();
         table.newKey(ptr, pkValues);
-        mutation.put(ptr, columnValues);
+        mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter()));
     }
 
     private static MutationState upsertSelect(PhoenixStatement statement, 
@@ -128,7 +129,7 @@ public class UpsertCompiler {
             boolean isAutoCommit = connection.getAutoCommit();
             byte[][] values = new byte[columnIndexes.length][];
             int rowCount = 0;
-            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize);
+            Map<ImmutableBytesPtr,RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
             PTable table = tableRef.getTable();
             ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
             ImmutableBytesWritable ptr = new ImmutableBytesWritable();
@@ -156,7 +157,7 @@ public class UpsertCompiler {
                             column.getMaxLength(), column.getScale(), column.getSortOrder());
                     values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                 }
-                setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
+                setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement);
                 rowCount++;
                 // Commit a batch if auto commit is true and we're at our batch size
                 if (isAutoCommit && rowCount % batchSize == 0) {
@@ -802,8 +803,8 @@ public class UpsertCompiler {
                         throw new IllegalStateException();
                     }
                 }
-                Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
-                setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
+                Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(1);
+                setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation, statement);
                 return new MutationState(tableRef, mutation, 0, maxSize, connection);
             }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
index 63bf6a1..a9d8311 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
@@ -19,23 +19,32 @@ package org.apache.phoenix.execute;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.jdbc.PhoenixConnection;
+
 public class CommitException extends SQLException {
-    private static final long serialVersionUID = 1L;
-    private final MutationState uncommittedState;
-    private final MutationState committedState;
+    private static final long serialVersionUID = 2L;
+    private final int[] uncommittedStatementIndexes;
 
-    public CommitException(Exception e, MutationState uncommittedState, MutationState committedState) {
+    public CommitException(Exception e, int[] uncommittedStatementIndexes) {
         super(e);
-        this.uncommittedState = uncommittedState;
-        this.committedState = committedState;
-    }
-
-    public MutationState getUncommittedState() {
-        return uncommittedState;
+        this.uncommittedStatementIndexes = uncommittedStatementIndexes;
     }
 
-    public MutationState getCommittedState() {
-        return committedState;
+    /**
+     * Returns indexes of UPSERT and DELETE statements that have failed. Indexes returned
+     * correspond to each failed statement's order of creation within a {@link PhoenixConnection} up to
+     * commit/rollback.
+     * <p>
+     * Statements whose index is returned in this set correspond to one or more HBase mutations that have failed.
+     * <p>
+     * Statement indexes are maintained correctly for connections that mutate and query 
+     * <b>data</b> (DELETE, UPSERT and SELECT) only. Statement (and their subsequent failure) order
+     * is undefined for connections that execute metadata operations due to the fact that Phoenix rolls
+     * back connections after metadata mutations.
+     * 
+     * @see PhoenixConnection#getStatementExecutionCounter()
+     */
+    public int[] getUncommittedStatementIndexes() {
+    	return uncommittedStatementIndexes;
     }
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 04626a6..8053f15 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.execute;
 
 import java.io.IOException;
 import java.sql.SQLException;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -61,9 +62,11 @@ import org.cloudera.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.sun.istack.NotNull;
 
 /**
  * 
@@ -78,40 +81,32 @@ public class MutationState implements SQLCloseable {
     private PhoenixConnection connection;
     private final long maxSize;
     private final ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
-    private final Map<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> mutations = Maps.newHashMapWithExpectedSize(3); // TODO: Sizing?
+    private final Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations;
     private long sizeOffset;
     private int numRows = 0;
 
-    public MutationState(int maxSize, PhoenixConnection connection) {
+    MutationState(long maxSize, PhoenixConnection connection, Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations) {
+        this.maxSize = maxSize;
+        this.connection = connection;
+        this.mutations = mutations;
+    }
+
+    public MutationState(long maxSize, PhoenixConnection connection) {
         this(maxSize,connection,0);
     }
     
-    public MutationState(int maxSize, PhoenixConnection connection, long sizeOffset) {
-        this.maxSize = maxSize;
-        this.connection = connection;
+    public MutationState(long maxSize, PhoenixConnection connection, long sizeOffset) {
+        this(maxSize, connection, Maps.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>newHashMapWithExpectedSize(connection.getMutateBatchSize()));
         this.sizeOffset = sizeOffset;
     }
     
-    public MutationState(TableRef table, Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
-        this.maxSize = maxSize;
-        this.connection = connection;
+    public MutationState(TableRef table, Map<ImmutableBytesPtr,RowMutationState> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
+        this(maxSize, connection, sizeOffset);
         this.mutations.put(table, mutations);
-        this.sizeOffset = sizeOffset;
         this.numRows = mutations.size();
         throwIfTooBig();
     }
     
-    private MutationState(List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> entries, long sizeOffset, long maxSize, PhoenixConnection connection) {
-        this.maxSize = maxSize;
-        this.connection = connection;
-        this.sizeOffset = sizeOffset;
-        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : entries) {
-            numRows += entry.getValue().size();
-            this.mutations.put(entry.getKey(), entry.getValue());
-        }
-        throwIfTooBig();
-    }
-    
     private void throwIfTooBig() {
         if (numRows > maxSize) {
             // TODO: throw SQLException ?
@@ -134,29 +129,28 @@ public class MutationState implements SQLCloseable {
         }
         this.sizeOffset += newMutation.sizeOffset;
         // Merge newMutation with this one, keeping state from newMutation for any overlaps
-        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : newMutation.mutations.entrySet()) {
+        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : newMutation.mutations.entrySet()) {
             // Replace existing entries for the table with new entries
             TableRef tableRef = entry.getKey();
             PTable table = tableRef.getTable();
             boolean isIndex = table.getType() == PTableType.INDEX;
-            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> existingRows = this.mutations.put(tableRef, entry.getValue());
+            Map<ImmutableBytesPtr,RowMutationState> existingRows = this.mutations.put(tableRef, entry.getValue());
             if (existingRows != null) { // Rows for that table already exist
                 // Loop through new rows and replace existing with new
-                for (Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry : entry.getValue().entrySet()) {
+                for (Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry : entry.getValue().entrySet()) {
                     // Replace existing row with new row
-                    Map<PColumn,byte[]> existingValues = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
-                    if (existingValues != null) {
+                	RowMutationState existingRowMutationState = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
+                    if (existingRowMutationState != null) {
+                    	Map<PColumn,byte[]> existingValues = existingRowMutationState.getColumnValues();
                         if (existingValues != PRow.DELETE_MARKER) {
-                            Map<PColumn,byte[]> newRow = rowEntry.getValue();
+                            Map<PColumn,byte[]> newRow = rowEntry.getValue().getColumnValues();
                             // if new row is PRow.DELETE_MARKER, it means delete, and we don't need to merge it with existing row. 
                             if (newRow != PRow.DELETE_MARKER) {
-                                // Replace existing column values with new column values
-                                for (Map.Entry<PColumn,byte[]> valueEntry : newRow.entrySet()) {
-                                    existingValues.put(valueEntry.getKey(), valueEntry.getValue());
-                                }
+                                // Merge existing column values with new column values
+                                existingRowMutationState.join(rowEntry.getValue());
                                 // Now that the existing row has been merged with the new row, replace it back
-                                // again (since it was replaced with the new one above).
-                                existingRows.put(rowEntry.getKey(), existingValues);
+                                // again (since it was merged with the new one above).
+                                existingRows.put(rowEntry.getKey(), existingRowMutationState);
                             }
                         }
                     } else {
@@ -176,16 +170,16 @@ public class MutationState implements SQLCloseable {
         throwIfTooBig();
     }
     
-    private Iterator<Pair<byte[],List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, Map<PColumn, byte[]>> values, long timestamp, boolean includeMutableIndexes) {
+    private Iterator<Pair<byte[],List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values, long timestamp, boolean includeMutableIndexes) {
         final Iterator<PTable> indexes = // Only maintain tables with immutable rows through this client-side mechanism
                 (tableRef.getTable().isImmutableRows() || includeMutableIndexes) ? 
                         IndexMaintainer.nonDisabledIndexIterator(tableRef.getTable().getIndexes().iterator()) : 
                         Iterators.<PTable>emptyIterator();
         final List<Mutation> mutations = Lists.newArrayListWithExpectedSize(values.size());
         final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
-        Iterator<Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>>> iterator = values.entrySet().iterator();
+        Iterator<Map.Entry<ImmutableBytesPtr,RowMutationState>> iterator = values.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry = iterator.next();
+            Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry = iterator.next();
             ImmutableBytesPtr key = rowEntry.getKey();
             PRow row = tableRef.getTable().newRow(connection.getKeyValueBuilder(), timestamp, key);
             List<Mutation> rowMutations, rowMutationsPertainingToIndex;
@@ -197,7 +191,7 @@ public class MutationState implements SQLCloseable {
                 // delete rows).
                 rowMutationsPertainingToIndex = Collections.emptyList();
             } else {
-                for (Map.Entry<PColumn,byte[]> valueEntry : rowEntry.getValue().entrySet()) {
+                for (Map.Entry<PColumn,byte[]> valueEntry : rowEntry.getValue().getColumnValues().entrySet()) {
                     row.setValue(valueEntry.getKey(), valueEntry.getValue());
                 }
                 rowMutations = row.toRowMutations();
@@ -249,14 +243,14 @@ public class MutationState implements SQLCloseable {
     }
     
     public Iterator<Pair<byte[],List<Mutation>>> toMutations(final boolean includeMutableIndexes) {
-        final Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
+        final Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>>> iterator = this.mutations.entrySet().iterator();
         if (!iterator.hasNext()) {
             return Iterators.emptyIterator();
         }
         Long scn = connection.getSCN();
         final long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
         return new Iterator<Pair<byte[],List<Mutation>>>() {
-            private Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> current = iterator.next();
+            private Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> current = iterator.next();
             private Iterator<Pair<byte[],List<Mutation>>> innerIterator = init();
                     
             private Iterator<Pair<byte[],List<Mutation>>> init() {
@@ -297,7 +291,7 @@ public class MutationState implements SQLCloseable {
         Long scn = connection.getSCN();
         MetaDataClient client = new MetaDataClient(connection);
         long[] timeStamps = new long[this.mutations.size()];
-        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : mutations.entrySet()) {
+        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : mutations.entrySet()) {
             TableRef tableRef = entry.getKey();
             long serverTimeStamp = tableRef.getTimeStamp();
             PTable table = tableRef.getTable();
@@ -312,12 +306,15 @@ public class MutationState implements SQLCloseable {
                         // TODO: use bitset?
                         table = result.getTable();
                         PColumn[] columns = new PColumn[table.getColumns().size()];
-                        for (Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry : entry.getValue().entrySet()) {
-                            Map<PColumn,byte[]> valueEntry = rowEntry.getValue();
-                            if (valueEntry != PRow.DELETE_MARKER) {
-                                for (PColumn column : valueEntry.keySet()) {
-                                    columns[column.getPosition()] = column;
-                                }
+                        for (Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry : entry.getValue().entrySet()) {
+                        	RowMutationState valueEntry = rowEntry.getValue();
+                            if (valueEntry != null) {
+                            	Map<PColumn, byte[]> colValues = valueEntry.getColumnValues();
+                            	if (colValues != PRow.DELETE_MARKER) {
+	                                for (PColumn column : colValues.keySet()) {
+	                                    columns[column.getPosition()] = column;
+	                                }
+                            	}
                             }
                         }
                         for (PColumn column : columns) {
@@ -357,15 +354,14 @@ public class MutationState implements SQLCloseable {
         int i = 0;
         byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
         long[] serverTimeStamps = validate();
-        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
-        List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size());
+        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>>> iterator = this.mutations.entrySet().iterator();
 
         // add tracing for this operation
         TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
         Span span = trace.getSpan();
         while (iterator.hasNext()) {
-            Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry = iterator.next();
-            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
+            Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry = iterator.next();
+            Map<ImmutableBytesPtr,RowMutationState> valuesMap = entry.getValue();
             TableRef tableRef = entry.getKey();
             PTable table = tableRef.getTable();
             table.getIndexMaintainers(tempPtr, connection);
@@ -425,7 +421,6 @@ public class MutationState implements SQLCloseable {
                         child.stop();
                         shouldRetry = false;
                         if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Total time for batch call of  " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms", connection));
-                        committedList.add(entry);
                     } catch (Exception e) {
                         SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                         if (inferredE != null) {
@@ -446,9 +441,7 @@ public class MutationState implements SQLCloseable {
                             }
                             e = inferredE;
                         }
-                        // Throw to client with both what was committed so far and what is left to be committed.
-                        // That way, client can either undo what was done or try again with what was not done.
-                        sqlE = new CommitException(e, this, new MutationState(committedList, this.sizeOffset, this.maxSize, this.connection));
+                        sqlE = new CommitException(e, getUncommittedSattementIndexes());
                     } finally {
                         try {
                             hTable.close();
@@ -488,7 +481,64 @@ public class MutationState implements SQLCloseable {
         numRows = 0;
     }
     
+    private int[] getUncommittedSattementIndexes() {
+    	int[] result = new int[0];
+    	for (Map<ImmutableBytesPtr, RowMutationState> rowMutations : mutations.values()) {
+    		for (RowMutationState rowMutationState : rowMutations.values()) {
+    			result = joinSortedIntArrays(result, rowMutationState.getStatementIndexes());
+    		}
+    	}
+    	return result;
+    }
+    
     @Override
     public void close() throws SQLException {
     }
+    
+    public static int[] joinSortedIntArrays(int[] a, int[] b) {
+        int[] result = new int[a.length + b.length];
+        int i = 0, j = 0, k = 0, current;
+        while (i < a.length && j < b.length) {
+            current = a[i] < b[j] ? a[i++] : b[j++];
+            for ( ; i < a.length && a[i] == current; i++);
+            for ( ; j < b.length && b[j] == current; j++);
+            result[k++] = current;
+        }
+        while (i < a.length) {
+            for (current = a[i++] ; i < a.length && a[i] == current; i++);
+            result[k++] = current;
+        }
+        while (j < b.length) {
+            for (current = b[j++] ; j < b.length && b[j] == current; j++);
+            result[k++] = current;
+        }
+        return Arrays.copyOf(result, k);
+    }
+    
+    public static class RowMutationState {
+        private Map<PColumn,byte[]> columnValues;
+        private int[] statementIndexes;
+
+        public RowMutationState(@NotNull Map<PColumn,byte[]> columnValues, int statementIndex) {
+            Preconditions.checkNotNull(columnValues);
+
+            this.columnValues = columnValues;
+            this.statementIndexes = new int[] {statementIndex};
+        }
+
+        Map<PColumn, byte[]> getColumnValues() {
+            return columnValues;
+        }
+
+        int[] getStatementIndexes() {
+            return statementIndexes;
+        }
+        
+        void join(RowMutationState newRow) {
+            getColumnValues().putAll(newRow.getColumnValues());
+            statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes());
+        }
+        
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index de9e323..c9ac94a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.expression.function.FunctionArgumentType;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -121,21 +122,21 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     private final Properties info;
     private List<SQLCloseable> statements = new ArrayList<SQLCloseable>();
     private final Map<PDataType<?>, Format> formatters = new HashMap<>();
-    private final MutationState mutationState;
+    private MutationState mutationState;
     private final int mutateBatchSize;
     private final Long scn;
     private boolean isAutoCommit = false;
     private PMetaData metaData;
     private final PName tenantId;
-    private final String datePattern;
+    private final String datePattern; 
     private final String timePattern;
     private final String timestampPattern;
-    
+    private int statementExecutionCounter;
     private boolean isClosed = false;
     private Sampler<?> sampler;
     private boolean readOnly = false;
-    private Map<String, String> customTracingAnnotations = emptyMap(); 
- 
+    private Map<String, String> customTracingAnnotations = emptyMap();
+    
     static {
         Tracing.addTraceMetricsSource();
     }
@@ -150,17 +151,20 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
         this.sampler = connection.sampler;
+        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(PhoenixConnection connection, long scn) throws SQLException {
         this(connection.getQueryServices(), connection, scn);
         this.sampler = connection.sampler;
+        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, PhoenixConnection connection, long scn) throws SQLException {
         this(services, connection.getURL(), newPropsWithSCN(scn,connection.getClientInfo()), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
         this.sampler = connection.sampler;
+        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, String url, Properties info, PMetaData metaData) throws SQLException {
@@ -233,7 +237,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
             }
             
         });
-        this.mutationState = new MutationState(maxSize, this);
+        this.mutationState = newMutationState(maxSize);
         this.services.addConnection(this);
 
         // setup tracing, if its enabled
@@ -361,6 +365,10 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         return metaData;
     }
 
+    protected MutationState newMutationState(int maxSize) {
+        return new MutationState(maxSize, this); 
+    }
+    
     public MutationState getMutationState() {
         return mutationState;
     }
@@ -426,6 +434,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
                 return null;
             }
         }, Tracing.withTracing(this, "committing mutations"));
+        statementExecutionCounter = 0;
     }
 
     @Override
@@ -626,6 +635,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     @Override
     public void rollback() throws SQLException {
         mutationState.rollback(this);
+        statementExecutionCounter = 0;
     }
 
     @Override
@@ -776,4 +786,19 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     public KeyValueBuilder getKeyValueBuilder() {
         return this.services.getKeyValueBuilder();
     }
+    
+    /**
+     * Used to track executions of {@link Statement}s and {@link PreparedStatement}s that were created from this connection before
+     * commit or rollback. 0-based. Used to associate partial save errors with SQL statements
+     * invoked by users.
+     * @see CommitException
+     * @see #incrementStatementExecutionCounter()
+     */
+    public int getStatementExecutionCounter() {
+		return statementExecutionCounter;
+	}
+    
+    public void incrementStatementExecutionCounter() {
+        statementExecutionCounter++;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
index 25be8c0..a23484c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
@@ -54,8 +54,8 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.schema.ExecuteQueryNotApplicableException;
 import org.apache.phoenix.schema.ExecuteUpdateNotApplicableException;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.Sequence;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.SQLCloseable;
 
@@ -79,8 +79,7 @@ public class PhoenixPreparedStatement extends PhoenixStatement implements Prepar
 
     private final String query;
 
-    public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) throws SQLException,
-            IOException {
+    public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) throws SQLException, IOException {
         super(connection);
         this.statement = parser.nextStatement(new ExecutableNodeFactory());
         if (this.statement == null) { throw new EOFException(); }
@@ -89,7 +88,7 @@ public class PhoenixPreparedStatement extends PhoenixStatement implements Prepar
         this.parameters = Arrays.asList(new Object[statement.getBindCount()]);
         Collections.fill(parameters, BindManager.UNBOUND_PARAMETER);
     }
-
+    
     public PhoenixPreparedStatement(PhoenixConnection connection, String query) throws SQLException {
         super(connection);
         this.query = query;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 4ca5bb5..c6d086a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -151,6 +151,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class PhoenixStatement implements Statement, SQLCloseable, org.apache.phoenix.jdbc.Jdbc7Shim.Statement {
+	
     private static final Logger logger = LoggerFactory.getLogger(PhoenixStatement.class);
     
     public enum Operation {
@@ -243,6 +244,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
                         setLastResultSet(rs);
                         setLastUpdateCount(NO_UPDATE);
                         setLastUpdateOperation(stmt.getOperation());
+                        connection.incrementStatementExecutionCounter();
                         return rs;
                     } catch (RuntimeException e) {
                         // FIXME: Expression.evaluate does not throw SQLException
@@ -289,6 +291,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
                                 int lastUpdateCount = (int) Math.min(Integer.MAX_VALUE, state.getUpdateCount());
                                 setLastUpdateCount(lastUpdateCount);
                                 setLastUpdateOperation(stmt.getOperation());
+                                connection.incrementStatementExecutionCounter();
                                 return lastUpdateCount;
                             } catch (RuntimeException e) {
                                 // FIXME: Expression.evaluate does not throw SQLException

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
new file mode 100644
index 0000000..67c3353
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import static org.apache.phoenix.execute.MutationState.joinSortedIntArrays;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class MutationStateTest {
+
+    @Test
+    public void testJoinIntArrays() {
+        // simple case
+        int[] a = new int[] {1};
+        int[] b = new int[] {2};
+        int[] result = joinSortedIntArrays(a, b);
+        
+        assertEquals(2, result.length);
+        assertArrayEquals(new int[] {1,2}, result);
+        
+        // empty arrays
+        a = new int[0];
+        b = new int[0];
+        result = joinSortedIntArrays(a, b);
+        
+        assertEquals(0, result.length);
+        assertArrayEquals(new int[] {}, result);
+        
+        // dupes between arrays
+        a = new int[] {1,2,3};
+        b = new int[] {1,2,4};
+        result = joinSortedIntArrays(a, b);
+        
+        assertEquals(4, result.length);
+        assertArrayEquals(new int[] {1,2,3,4}, result);
+        
+        // dupes within arrays
+        a = new int[] {1,2,2,3};
+        b = new int[] {1,2,4};
+        result = joinSortedIntArrays(a, b);
+        
+        assertEquals(4, result.length);
+        assertArrayEquals(new int[] {1,2,3,4}, result);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58c782/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 9947440..b64eff8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -655,7 +655,7 @@ public abstract class BaseTest {
      * Create a {@link PhoenixTestDriver} and register it.
      * @return an initialized and registered {@link PhoenixTestDriver} 
      */
-    protected static PhoenixTestDriver initAndRegisterDriver(String url, ReadOnlyProps props) throws Exception {
+    public static PhoenixTestDriver initAndRegisterDriver(String url, ReadOnlyProps props) throws Exception {
         PhoenixTestDriver newDriver = new PhoenixTestDriver(props);
         DriverManager.registerDriver(newDriver);
         Driver oldDriver = DriverManager.getDriver(url); 


[24/50] [abbrv] phoenix git commit: PHOENIX-653 Support ANSI-standard date literals from SQL 2003

Posted by ma...@apache.org.
PHOENIX-653 Support ANSI-standard date literals from SQL 2003


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d5913b8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d5913b8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d5913b8

Branch: refs/heads/calcite
Commit: 2d5913b80349179da5aa18a1abbb56c230ee0542
Parents: 11a76b2
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Feb 8 20:46:46 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Feb 9 18:37:14 2015 -0800

----------------------------------------------------------------------
 phoenix-core/src/main/antlr3/PhoenixSQL.g       | 17 ++++-
 .../expression/ArrayConstructorExpression.java  | 12 ++++
 .../phoenix/expression/LiteralExpression.java   | 17 +++--
 .../apache/phoenix/parse/ParseNodeFactory.java  | 14 ++++
 .../phoenix/schema/types/PArrayDataType.java    | 17 +++++
 .../apache/phoenix/schema/types/PBinary.java    | 15 ++--
 .../org/apache/phoenix/schema/types/PChar.java  | 14 ++--
 .../apache/phoenix/schema/types/PDataType.java  | 12 ++--
 .../org/apache/phoenix/schema/types/PDate.java  | 14 ++--
 .../apache/phoenix/schema/types/PDecimal.java   | 19 +++--
 .../org/apache/phoenix/schema/types/PTime.java  |  4 +-
 .../apache/phoenix/schema/types/PTimestamp.java |  4 +-
 .../phoenix/schema/types/PUnsignedDate.java     | 12 ++--
 .../phoenix/schema/types/PUnsignedTime.java     | 11 ++-
 .../schema/types/PUnsignedTimestamp.java        | 11 ++-
 .../apache/phoenix/schema/types/PVarbinary.java | 25 ++++---
 .../apache/phoenix/schema/types/PVarchar.java   | 15 ++--
 .../compile/StatementHintsCompilationTest.java  |  2 +-
 .../apache/phoenix/parse/QueryParserTest.java   | 76 ++++++++++++++++++++
 .../org/apache/phoenix/query/QueryPlanTest.java |  4 +-
 20 files changed, 242 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index fad5fb3..cda93fe 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -152,6 +152,12 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PUnsignedDate;
+import org.apache.phoenix.schema.types.PUnsignedTime;
+import org.apache.phoenix.schema.types.PUnsignedTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
 }
@@ -864,7 +870,9 @@ literal_or_bind returns [ParseNode ret]
 
 // Get a string, integer, double, date, boolean, or NULL value.
 literal returns [LiteralParseNode ret]
-    :   t=STRING_LITERAL { ret = factory.literal(t.getText()); }
+    :   t=STRING_LITERAL {
+            ret = factory.literal(t.getText()); 
+        }
     |   l=int_literal { ret = l; }
     |   l=long_literal { ret = l; }
     |   l=double_literal { ret = l; }
@@ -878,6 +886,13 @@ literal returns [LiteralParseNode ret]
     |   NULL {ret = factory.literal(null);}
     |   TRUE {ret = factory.literal(Boolean.TRUE);} 
     |   FALSE {ret = factory.literal(Boolean.FALSE);}
+    |   dt=identifier t=STRING_LITERAL { 
+            try {
+                ret = factory.literal(t.getText(), dt);
+            } catch (SQLException e) {
+                throw new RuntimeException(e);
+            }
+        }
     ;
     
 int_literal returns [LiteralParseNode ret]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java
index 9b0ee8f..15cd14c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java
@@ -166,4 +166,16 @@ public class ArrayConstructorExpression extends BaseCompoundExpression {
         }
         return t;
     }
+    
+    @Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "[");
+        if (children.size()==0)
+            return buf.append("]").toString();
+        for (int i = 0; i < children.size() - 1; i++) {
+            buf.append(children.get(i) + ",");
+        }
+        buf.append(children.get(children.size()-1) + "]");
+        return buf.toString();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index e2bdc82..26c076c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -25,17 +25,17 @@ import java.sql.SQLException;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
-import org.apache.phoenix.schema.types.PChar;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PTime;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.types.PhoenixArray;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.TypeMismatchException;
-import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.StringUtil;
 
@@ -220,7 +220,14 @@ public class LiteralExpression extends BaseTerminalExpression {
     
     @Override
     public String toString() {
-        return value == null ? "null" : type.toStringLiteral(byteValue, null);
+        if (value == null) {
+            return "null";
+        }
+        // TODO: move into PDataType?
+        if (type.isCoercibleTo(PTimestamp.INSTANCE)) {
+            return type + " " + type.toStringLiteral(value, null);
+        }
+        return type.toStringLiteral(value, null);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 57507b8..c92dbb6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -48,6 +48,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.ListMultimap;
@@ -467,6 +468,19 @@ public class ParseNodeFactory {
             value = expectedType.toObject(value, actualType);
         }
         return new LiteralParseNode(value);
+        /*
+        Object typedValue = expectedType.toObject(value.toString());
+        return new LiteralParseNode(typedValue);
+        */
+    }
+
+    public LiteralParseNode literal(String value, String sqlTypeName) throws SQLException {
+        PDataType expectedType = sqlTypeName == null ? null : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName));
+        if (expectedType == null || !expectedType.isCoercibleTo(PTimestamp.INSTANCE)) {
+            throw TypeMismatchException.newException(expectedType, PTimestamp.INSTANCE);
+        }
+        Object typedValue = expectedType.toObject(value);
+        return new LiteralParseNode(typedValue);
     }
 
     public LiteralParseNode coerce(LiteralParseNode literalNode, PDataType expectedType) throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index 30fab95..c183b7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.schema.types;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.text.Format;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -653,4 +654,20 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
         return instantiatePhoenixArray(baseType, array);
     }
 
+    @Override
+    public String toStringLiteral(Object o, Format formatter) {
+        StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "[");
+        PhoenixArray array = (PhoenixArray)o;
+        PDataType baseType = PDataType.arrayBaseType(this);
+        int len = array.getDimensions();
+        if (len != 0)  {
+            for (int i = 0; i < len; i++) {
+                buf.append(baseType.toStringLiteral(array.getElement(i), null));
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(']');
+        return buf.toString();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
index d188387..69d3796 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
@@ -17,15 +17,15 @@
  */
 package org.apache.phoenix.schema.types;
 
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.ValueTypeIncompatibleException;
 import org.apache.phoenix.schema.SortOrder;
 
-import java.sql.Types;
-import java.text.Format;
-
 public class PBinary extends PDataType<byte[]> {
 
   public static final PBinary INSTANCE = new PBinary();
@@ -176,13 +176,18 @@ public class PBinary extends PDataType<byte[]> {
 
   @Override
   public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    if (formatter == null && b.length == 1) {
-      return Integer.toString(0xFF & b[0]);
+    if (length == 1) {
+      return Integer.toString(0xFF & b[offset]);
     }
     return PVarbinary.INSTANCE.toStringLiteral(b, offset, length, formatter);
   }
 
   @Override
+  public String toStringLiteral(Object o, Format formatter) {
+    return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter);
+  }
+
+  @Override
   public Object getSampleValue(Integer maxLength, Integer arrayLength) {
     return PVarbinary.INSTANCE.getSampleValue(maxLength, arrayLength);
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
index 3100f89..aaee1ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
@@ -17,16 +17,17 @@
  */
 package org.apache.phoenix.schema.types;
 
-import com.google.common.base.Strings;
+import java.sql.Types;
+import java.text.Format;
+import java.util.Arrays;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.ValueTypeIncompatibleException;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.StringUtil;
 
-import java.sql.Types;
-import java.text.Format;
-import java.util.Arrays;
+import com.google.common.base.Strings;
 
 /**
  * Fixed length single byte characters
@@ -197,6 +198,11 @@ public class PChar extends PDataType<String> {
     }
 
     @Override
+    public String toStringLiteral(Object o, Format formatter) {
+      return PVarchar.INSTANCE.toStringLiteral(o, formatter);
+    }
+
+    @Override
     public Object getSampleValue(Integer maxLength, Integer arrayLength) {
       return PVarchar.INSTANCE.getSampleValue(maxLength, arrayLength);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 85e5711..8f46a3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -1091,10 +1091,14 @@ public abstract class PDataType<T> implements DataType<T>, Comparable<PDataType<
 
   public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
     Object o = toObject(b, offset, length);
-    if (formatter != null) {
-      return formatter.format(o);
-    }
-    return o.toString();
+    return toStringLiteral(o, formatter);
+  }
+  
+  public String toStringLiteral(Object o, Format formatter) {
+      if (formatter != null) {
+          return formatter.format(o);
+        }
+        return o.toString();
   }
 
   private static final PhoenixArrayFactory DEFAULT_ARRAY_FACTORY = new PhoenixArrayFactory() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index bbd0a35..fa070d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -147,13 +147,13 @@ public class PDate extends PDataType<Date> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) {
-      // If default formatter has not been overridden,
-      // use one that displays milliseconds.
-      formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER;
-    }
-    return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
+  public String toStringLiteral(Object o, Format formatter) {
+      if (formatter == null) {
+          // If default formatter has not been overridden,
+          // use one that displays milliseconds.
+          formatter = DateUtil.DEFAULT_DATE_FORMATTER;
+        }
+        return "'" + super.toStringLiteral(o, formatter) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
index 6b2dc84..e90491b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
@@ -17,7 +17,11 @@
  */
 package org.apache.phoenix.schema.types;
 
-import com.google.common.base.Preconditions;
+import java.math.BigDecimal;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
@@ -25,10 +29,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.NumberUtil;
 
-import java.math.BigDecimal;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.text.Format;
+import com.google.common.base.Preconditions;
 
 public class PDecimal extends PDataType<BigDecimal> {
 
@@ -390,6 +391,14 @@ public class PDecimal extends PDataType<BigDecimal> {
   }
 
   @Override
+  public String toStringLiteral(Object o, Format formatter) {
+      if (formatter == null) {
+          return ((BigDecimal)o).toPlainString();
+        }
+        return super.toStringLiteral(o, formatter);
+  }
+
+  @Override
   public Object getSampleValue(Integer maxLength, Integer arrayLength) {
     return new BigDecimal((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength));
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
index 81cbaff..0cfb0e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
@@ -127,11 +127,11 @@ public class PTime extends PDataType<Time> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
+  public String toStringLiteral(Object o, Format formatter) {
       if (formatter == null) {
           formatter = DateUtil.DEFAULT_TIME_FORMATTER;
         }
-        return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
+        return "'" + super.toStringLiteral(o, formatter) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index 8182e33..9a82cc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -180,11 +180,11 @@ public class PTimestamp extends PDataType<Timestamp> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
+  public String toStringLiteral(Object o, Format formatter) {
       if (formatter == null) {
           formatter = DateUtil.DEFAULT_TIMESTAMP_FORMATTER;
         }
-        return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
+        return "'" + super.toStringLiteral(o, formatter) + "'";
   }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java
index 8b63fbb..a6b1bc3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.schema.types;
 
+import java.sql.Date;
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.DateUtil;
 
-import java.sql.Types;
-import java.sql.Date;
-import java.text.Format;
-
 public class PUnsignedDate extends PDataType<Date> {
 
   public static final PUnsignedDate INSTANCE = new PUnsignedDate();
@@ -109,14 +109,14 @@ public class PUnsignedDate extends PDataType<Date> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
+  public String toStringLiteral(Object o, Format formatter) {
     // Can't delegate, as the super.toStringLiteral calls this.toBytes
     if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) {
       // If default formatter has not been overridden,
       // use one that displays milliseconds.
       formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER;
     }
-    return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
+    return "'" + super.toStringLiteral(o, formatter) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java
index f738f44..4173be1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java
@@ -17,13 +17,13 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.schema.SortOrder;
-
 import java.sql.Time;
 import java.sql.Types;
 import java.text.Format;
 
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.SortOrder;
+
 public class PUnsignedTime extends PDataType<Time> {
 
   public static final PUnsignedTime INSTANCE = new PUnsignedTime();
@@ -103,6 +103,11 @@ public class PUnsignedTime extends PDataType<Time> {
   }
 
   @Override
+  public String toStringLiteral(Object o, Format formatter) {
+    return PUnsignedDate.INSTANCE.toStringLiteral(o, formatter);
+  }
+
+  @Override
   public int getResultSetSqlType() {
     return Types.TIME;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java
index c13de56..450408f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java
@@ -114,14 +114,11 @@ public class PUnsignedTimestamp extends PDataType<Timestamp> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    java.sql.Timestamp value = (java.sql.Timestamp) toObject(b, offset, length);
-    if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) {
-      // If default formatter has not been overridden,
-      // use one that displays milliseconds.
-      formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER;
+  public String toStringLiteral(Object o, Format formatter) {
+    if (formatter == null) {
+      formatter = DateUtil.DEFAULT_TIMESTAMP_FORMATTER;
     }
-    return "'" + super.toStringLiteral(b, offset, length, formatter) + "." + value.getNanos() + "'";
+    return "'" + super.toStringLiteral(o, formatter) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java
index 6ba4dc4..bb1d4c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java
@@ -17,15 +17,15 @@
  */
 package org.apache.phoenix.schema.types;
 
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.ByteUtil;
 
-import java.sql.Types;
-import java.text.Format;
-
 public class PVarbinary extends PDataType<byte[]> {
 
   public static final PVarbinary INSTANCE = new PVarbinary();
@@ -148,20 +148,25 @@ public class PVarbinary extends PDataType<byte[]> {
 
   @Override
   public String toStringLiteral(byte[] b, int o, int length, Format formatter) {
-    if (formatter != null) {
-      return formatter.format(b);
-    }
     StringBuilder buf = new StringBuilder();
     buf.append('[');
-    for (int i = 0; i < b.length; i++) {
-      buf.append(0xFF & b[i]);
-      buf.append(',');
+    if (length > 0) {
+        for (int i = o; i < length; i++) {
+          buf.append(0xFF & b[i]);
+          buf.append(',');
+        }
+        buf.setLength(buf.length()-1);
     }
-    buf.setCharAt(buf.length() - 1, ']');
+    buf.append(']');
     return buf.toString();
   }
 
   @Override
+  public String toStringLiteral(Object o, Format formatter) {
+      return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter);
+  }
+  
+  @Override
   public Object getSampleValue(Integer maxLength, Integer arrayLength) {
     int length = maxLength != null && maxLength > 0 ? maxLength : 1;
     byte[] b = new byte[length];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
index 9ecfb4e..6956942 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
@@ -17,14 +17,15 @@
  */
 package org.apache.phoenix.schema.types;
 
-import com.google.common.base.Preconditions;
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.ByteUtil;
 
-import java.sql.Types;
-import java.text.Format;
+import com.google.common.base.Preconditions;
 
 public class PVarchar extends PDataType<String> {
 
@@ -137,15 +138,11 @@ public class PVarchar extends PDataType<String> {
   }
 
   @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    while (b[length - 1] == 0) {
-      length--;
-    }
+  public String toStringLiteral(Object o, Format formatter) {
     if (formatter != null) {
-      Object o = toObject(b, offset, length);
       return "'" + formatter.format(o) + "'";
     }
-    return "'" + Bytes.toStringBinary(b, offset, length) + "'";
+    return "'" + o + "'";
   }
 
   private char[] sampleChars = new char[1];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java
index 13e2860..7f8adfa 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java
@@ -99,7 +99,7 @@ public class StatementHintsCompilationTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("create table eh (organization_id char(15) not null,parent_id char(15) not null, created_date date not null, entity_history_id char(15) not null constraint pk primary key (organization_id, parent_id, created_date, entity_history_id))");
         ResultSet rs = conn.createStatement().executeQuery("explain select /*+ RANGE_SCAN */ ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID from eh where ORGANIZATION_ID='111111111111111' and SUBSTR(PARENT_ID, 1, 3) = 'foo' and CREATED_DATE >= TO_DATE ('2012-11-01 00:00:00') and CREATED_DATE < TO_DATE ('2012-11-30 00:00:00') order by ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID limit 100");
         assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER EH ['111111111111111','foo            ','2012-11-01 00:00:00.000'] - ['111111111111111','fop            ','2012-11-30 00:00:00.000']\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY AND (CREATED_DATE >= '2012-11-01 00:00:00.000' AND CREATED_DATE < '2012-11-30 00:00:00.000')\n" + 
+                "    SERVER FILTER BY FIRST KEY ONLY AND (CREATED_DATE >= DATE '2012-11-01 00:00:00.000' AND CREATED_DATE < DATE '2012-11-30 00:00:00.000')\n" + 
                 "    SERVER TOP 100 ROWS SORTED BY [ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID]\n" + 
                 "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 201172b..866365a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -677,4 +677,80 @@ public class QueryParserTest {
                 new StringReader("select * from date_test where d in (to_date('2013-11-04 09:12:00'))"));
         parser.parseStatement();
     }
+    
+    @Test
+    public void testDateLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = DATE '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+
+    @Test
+    public void testTimeLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = TIME '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+
+
+    @Test
+    public void testTimestampLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = TIMESTAMP '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+    
+    @Test
+    public void testUnsignedDateLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = UNSIGNED_DATE '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+
+    @Test
+    public void testUnsignedTimeLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = UNSIGNED_TIME '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+
+
+    @Test
+    public void testUnsignedTimestampLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = UNSIGNED_TIMESTAMP '2013-11-04 09:12:00'"));
+        parser.parseStatement();
+    }
+    
+    @Test
+    public void testUnknownLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = FOO '2013-11-04 09:12:00'"));
+        try {
+            parser.parseStatement();
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode());
+        }
+    }
+    
+    @Test
+    public void testUnsupportedLiteral() throws Exception {
+        SQLParser parser = new SQLParser(
+                new StringReader(
+                        "select * from t where d = DECIMAL '2013-11-04 09:12:00'"));
+        try {
+            parser.parseStatement();
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode());
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d5913b8/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
index 6139aa5..7ad3e25 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
@@ -52,12 +52,12 @@ public class QueryPlanTest extends BaseConnectionlessQueryTest {
 
                 "SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND date >= to_date('2013-01-01')",
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY AND DATE >= '2013-01-01 00:00:00.000'",
+                "    SERVER FILTER BY FIRST KEY ONLY AND DATE >= DATE '2013-01-01 00:00:00.000'",
 
                 // Since inst IS NOT NULL is unbounded, we won't continue optimizing
                 "SELECT host FROM PTSDB WHERE inst IS NOT NULL AND host IS NULL AND date >= to_date('2013-01-01')",
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [not null]\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY AND (HOST IS NULL AND DATE >= '2013-01-01 00:00:00.000')",
+                "    SERVER FILTER BY FIRST KEY ONLY AND (HOST IS NULL AND DATE >= DATE '2013-01-01 00:00:00.000')",
 
                 "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id = '000000000000002' AND x_integer = 2 AND a_integer < 5 ",
                 "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER ATABLE\n" + 


[41/50] [abbrv] phoenix git commit: PHOENIX-1686 Data length should be checked in KeyValueSchema.next() and maxOffset should be set correctly by the caller

Posted by ma...@apache.org.
PHOENIX-1686 Data length should be checked in KeyValueSchema.next() and maxOffset should be set correctly by the caller


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/93f56057
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/93f56057
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/93f56057

Branch: refs/heads/calcite
Commit: 93f560575319b7f26f5fc5db618bc2d8a09be930
Parents: 569469a
Author: maryannxue <we...@intel.com>
Authored: Fri Feb 27 16:43:13 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Fri Feb 27 16:43:13 2015 -0500

----------------------------------------------------------------------
 .../phoenix/expression/ProjectedColumnExpression.java |  2 +-
 .../org/apache/phoenix/schema/KeyValueSchema.java     | 14 +++++++++-----
 2 files changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/93f56057/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
index d090203..97d1aff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
@@ -106,9 +106,9 @@ public class ProjectedColumnExpression extends ColumnExpression {
         try {
             KeyValueSchema schema = getSchema();
             TupleProjector.decodeProjectedValue(tuple, ptr);
-            int maxOffset = ptr.getOffset() + ptr.getLength();
             bitSet.clear();
             bitSet.or(ptr);
+            int maxOffset = ptr.getOffset() + ptr.getLength() - bitSet.getEstimatedLength();
             schema.iterator(ptr, position, bitSet);
             Boolean hasValue = schema.next(ptr, position, maxOffset, bitSet);
             if (hasValue == null || !hasValue.booleanValue())

http://git-wip-us.apache.org/repos/asf/phoenix/blob/93f56057/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
index 595103f..1ab8c86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
@@ -22,6 +22,8 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.http.annotation.Immutable;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
@@ -203,12 +205,14 @@ public class KeyValueSchema extends ValueSchema {
         ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength(), 0);
         if (!isNull(position, valueSet)) {
             Field field = this.getField(position);
-            if (field.getDataType().isFixedWidth()) {
-                ptr.set(ptr.get(),ptr.getOffset(), field.getByteSize());
-            } else {
-                int length = ByteUtil.vintFromBytes(ptr);
-                ptr.set(ptr.get(),ptr.getOffset(),length);
+            int length = field.getDataType().isFixedWidth() ? 
+                    field.getByteSize() : ByteUtil.vintFromBytes(ptr);
+            if (ptr.getOffset() + length > maxOffset) {
+                throw new RuntimeException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA)
+                    .setMessage("Expected length of at least " + length + " bytes, but had " + (maxOffset
+                                    - ptr.getOffset())).build().buildException());
             }
+            ptr.set(ptr.get(),ptr.getOffset(),length);
             return ptr.getLength() > 0;
         }
         return false;


[21/50] [abbrv] phoenix git commit: PHOENIX-1646 Views and functional index expressions may lose information when stringified

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/OuterJoinParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/OuterJoinParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/OuterJoinParseNode.java
deleted file mode 100644
index 97f636b..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/OuterJoinParseNode.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.parse;
-
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.List;
-
-
-
-/**
- * 
- * Node representing an outer join qualifier (+) in SQL
- * TODO: remove Oracle specific syntax
- *
- * 
- * @since 0.1
- */
-public class OuterJoinParseNode extends UnaryParseNode{
-    OuterJoinParseNode(ParseNode node) {
-        super(node);
-    }
-
-    @Override
-    public <T> T accept(ParseNodeVisitor<T> visitor) throws SQLException {
-        List<T> l = Collections.emptyList();
-        if (visitor.visitEnter(this)) {
-            l = acceptChildren(visitor);
-        }
-        return visitor.visitLeave(this, l);
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNode.java
index 2ee8a83..b32674e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNode.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.parse;
 import java.sql.SQLException;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 
@@ -47,4 +49,13 @@ public abstract class ParseNode {
     public String getAlias() {
         return null;
     }
+    
+    @Override
+    public final String toString() {
+        StringBuilder buf = new StringBuilder();
+        toSQL(null, buf);
+        return buf.toString();
+    }
+    
+    public abstract void toSQL(ColumnResolver resolver, StringBuilder buf);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index c92dbb6..ddfaa03 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.parse;
 
 import java.lang.reflect.Constructor;
 import java.sql.SQLException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -52,6 +51,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 /**
@@ -391,12 +391,22 @@ public class ParseNodeFactory {
     public FunctionParseNode function(String name, List<ParseNode> valueNodes,
             List<ParseNode> columnNodes, boolean isAscending) {
 
-        List<ParseNode> children = new ArrayList<ParseNode>();
-        children.addAll(columnNodes);
-        children.add(new LiteralParseNode(Boolean.valueOf(isAscending)));
-        children.addAll(valueNodes);
+        List<ParseNode> args = Lists.newArrayListWithExpectedSize(columnNodes.size() + valueNodes.size() + 1);
+        args.addAll(columnNodes);
+        args.add(new LiteralParseNode(Boolean.valueOf(isAscending)));
+        args.addAll(valueNodes);
 
-        return function(name, children);
+        BuiltInFunctionInfo info = getInfo(name, args);
+        Constructor<? extends FunctionParseNode> ctor = info.getNodeCtor();
+        if (ctor == null) {
+            return new AggregateFunctionWithinGroupParseNode(name, args, info);
+        } else {
+            try {
+                return ctor.newInstance(name, args, info);
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
     }
 
     public HintNode hint(String hint) {
@@ -561,8 +571,12 @@ public class ParseNodeFactory {
     	return new ArrayConstructorNode(upsertStmtArray);
     }
 
-    public MultiplyParseNode negate(ParseNode child) {
-        return new MultiplyParseNode(Arrays.asList(child,this.literal(-1)));
+    public ParseNode negate(ParseNode child) {
+        // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each re-parsing
+        if (LiteralParseNode.ONE.equals(child)) {
+            return LiteralParseNode.MINUS_ONE;
+        }
+        return new MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE));
     }
 
     public NotEqualParseNode notEqual(ParseNode lhs, ParseNode rhs) {
@@ -588,10 +602,6 @@ public class ParseNodeFactory {
     }
 
 
-    public OuterJoinParseNode outer(ParseNode node) {
-        return new OuterJoinParseNode(node);
-    }
-
     public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
             List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java
index 87038c7..3d6d7f1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 /**
  * 
  * Node representing a row value constructor in SQL.  
@@ -43,4 +45,18 @@ public class RowValueConstructorParseNode extends CompoundParseNode {
         return visitor.visitLeave(this, l);
     }
 
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        List<ParseNode> children = getChildren();
+        buf.append(' ');
+        buf.append('(');
+        if (!children.isEmpty()) {
+            for (ParseNode child : children) {
+                child.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 961846b..71cabd6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.expression.function.CountAggregateFunction;
 import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
@@ -93,6 +94,104 @@ public class SelectStatement implements FilterableStatement {
     private final boolean isAggregate;
     private final boolean hasSequence;
     
+    @Override
+    public final String toString() {
+        StringBuilder buf = new StringBuilder();
+        toSQL(null,buf);
+        return buf.toString();
+    }
+
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append("SELECT ");
+        if (hint != null) buf.append(hint);
+        if (isDistinct) buf.append("DISTINCT ");
+        for (AliasedNode selectNode : select) {
+            selectNode.toSQL(resolver, buf);
+            buf.append(',');
+        }
+        buf.setLength(buf.length()-1);
+        buf.append(" FROM ");
+        fromTable.toSQL(resolver, buf);
+        if (where != null) {
+            buf.append(" WHERE ");
+            where.toSQL(resolver, buf);
+        }
+        if (!groupBy.isEmpty()) {
+            buf.append(" GROUP BY ");
+            for (ParseNode node : groupBy) {
+                node.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        if (having != null) {
+            buf.append(" HAVING ");
+            having.toSQL(resolver, buf);            
+        }
+        if (!orderBy.isEmpty()) {
+            buf.append(" ORDER BY ");
+            for (OrderByNode node : orderBy) {
+                node.toSQL(resolver, buf);
+                buf.append(',');
+            }
+            buf.setLength(buf.length()-1);
+        }
+        if (limit != null) {
+            buf.append(" LIMIT " + limit.toString());
+        }
+    }    
+
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((fromTable == null) ? 0 : fromTable.hashCode());
+        result = prime * result + ((groupBy == null) ? 0 : groupBy.hashCode());
+        result = prime * result + ((having == null) ? 0 : having.hashCode());
+        result = prime * result + ((hint == null) ? 0 : hint.hashCode());
+        result = prime * result + (isDistinct ? 1231 : 1237);
+        result = prime * result + ((limit == null) ? 0 : limit.hashCode());
+        result = prime * result + ((orderBy == null) ? 0 : orderBy.hashCode());
+        result = prime * result + ((select == null) ? 0 : select.hashCode());
+        result = prime * result + ((where == null) ? 0 : where.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        SelectStatement other = (SelectStatement)obj;
+        if (fromTable == null) {
+            if (other.fromTable != null) return false;
+        } else if (!fromTable.equals(other.fromTable)) return false;
+        if (groupBy == null) {
+            if (other.groupBy != null) return false;
+        } else if (!groupBy.equals(other.groupBy)) return false;
+        if (having == null) {
+            if (other.having != null) return false;
+        } else if (!having.equals(other.having)) return false;
+        if (hint == null) {
+            if (other.hint != null) return false;
+        } else if (!hint.equals(other.hint)) return false;
+        if (isDistinct != other.isDistinct) return false;
+        if (limit == null) {
+            if (other.limit != null) return false;
+        } else if (!limit.equals(other.limit)) return false;
+        if (orderBy == null) {
+            if (other.orderBy != null) return false;
+        } else if (!orderBy.equals(other.orderBy)) return false;
+        if (select == null) {
+            if (other.select != null) return false;
+        } else if (!select.equals(other.select)) return false;
+        if (where == null) {
+            if (other.where != null) return false;
+        } else if (!where.equals(other.where)) return false;
+        return true;
+    }
+
     // Count constant expressions
     private static int countConstants(List<ParseNode> nodes) {
         int count = 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
index 260584f..a5d60fe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 public class SequenceValueParseNode extends TerminalParseNode {
     public enum Op {
@@ -89,4 +91,12 @@ public class SequenceValueParseNode extends TerminalParseNode {
 			return false;
 		return true;
 	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append(op.getName());
+        buf.append(" VALUE FOR ");
+        buf.append(tableName);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java
index 3fd27de..5eba979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java
@@ -21,6 +21,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 
@@ -46,4 +48,16 @@ public class StringConcatParseNode extends CompoundParseNode {
         return visitor.visitLeave(this, l);
     }
     
+    
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        List<ParseNode> children = getChildren();
+        children.get(0).toSQL(resolver, buf);
+        for (int i = 1 ; i < children.size(); i++) {
+            buf.append(" || ");
+            children.get(i).toSQL(resolver, buf);
+        }
+        buf.append(')');
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
index b7bcb64..d73958e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -78,4 +80,10 @@ public class SubqueryParseNode extends TerminalParseNode {
 		return true;
 	}
     
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append('(');
+        select.toSQL(resolver, buf);
+        buf.append(')');
+    }    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java
index 01e6654..decc3ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java
@@ -31,6 +31,13 @@ import java.util.List;
  * @since 0.1
  */
 public class SubtractParseNode extends ArithmeticParseNode {
+    public static final String OPERATOR = "-";
+
+    @Override
+    public String getOperator() {
+        return OPERATOR;
+    }
+
     SubtractParseNode(List<ParseNode> children) {
         super(children);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
index 654e899..61bfa6b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableName.java
@@ -60,7 +60,9 @@ public class TableName {
     
     @Override
     public String toString() {
-        return (schemaName == null ? "" : schemaName + QueryConstants.NAME_SEPARATOR)  + tableName;
+        return (schemaName == null ? "" : ((isSchemaNameCaseSensitive ? "\"" : "") + schemaName
+                + (isSchemaNameCaseSensitive ? "\"" : "") + QueryConstants.NAME_SEPARATOR))
+                + ((isTableNameCaseSensitive ? "\"" : "") + tableName + (isTableNameCaseSensitive ? "\"" : ""));
     }
     
 	@Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableNode.java
index 7ab8d0c..7c37234 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -39,6 +41,14 @@ public abstract class TableNode {
         return alias;
     }
 
+    @Override
+    public final String toString() {
+        StringBuilder buf = new StringBuilder();
+        toSQL(null,buf);
+        return buf.toString();
+    }
+
     public abstract <T> T accept(TableNodeVisitor<T> visitor) throws SQLException;
+    public abstract void toSQL(ColumnResolver resolver, StringBuilder buf);
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
index 7292347..7c7f416 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 public class TableWildcardParseNode extends NamedParseNode {
     private final TableName tableName;
     private final boolean isRewrite;
@@ -75,5 +77,10 @@ public class TableWildcardParseNode extends NamedParseNode {
 		return true;
 	}
 
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        toSQL(buf);
+        buf.append(".*");
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
index fdfb64f..9922c3f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
 
+import org.apache.phoenix.compile.ColumnResolver;
+
 
 
 /**
@@ -44,11 +46,6 @@ public class WildcardParseNode extends TerminalParseNode {
         return visitor.visit(this);
     }
 
-    @Override
-    public String toString() {
-        return NAME;
-    }
-
     public boolean isRewrite() {
         return isRewrite;
     }
@@ -73,6 +70,13 @@ public class WildcardParseNode extends TerminalParseNode {
 		if (isRewrite != other.isRewrite)
 			return false;
 		return true;
-	}    
+	}
+
+    @Override
+    public void toSQL(ColumnResolver resolver, StringBuilder buf) {
+        buf.append(' ');
+        buf.append(NAME);
+        buf.append(' ');
+    }    
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index fceb724..2722cb6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1002,6 +1002,12 @@ public class MetaDataClient {
                     }
                     unusedPkColumns.remove(expression);
                     
+                    // Go through parse node to get string as otherwise we
+                    // can lose information during compilation
+                    StringBuilder buf = new StringBuilder();
+                    parseNode.toSQL(resolver, buf);
+                    String expressionStr = buf.toString();
+                    
                     ColumnName colName = null;
                     ColumnRef colRef = expressionIndexCompiler.getColumnRef();
 					if (colRef!=null) { 
@@ -1013,13 +1019,13 @@ public class MetaDataClient {
 					else { 
 						// if this is an expression
 					    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
-						String name = expression.toString().replaceAll("\"", "'");
+						String name = expressionStr.replaceAll("\"", "'");
                         colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
 					}
 					indexedColumnNames.add(colName);
                 	PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
                     allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, pair.getSecond()));
-                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expression.toString()));
+                    columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr));
                 }
 
                 // Next all the PK columns from the data table that aren't indexed

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index fa070d3..b926afb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.StringUtil;
 
 public class PDate extends PDataType<Date> {
 
@@ -150,10 +151,10 @@ public class PDate extends PDataType<Date> {
   public String toStringLiteral(Object o, Format formatter) {
       if (formatter == null) {
           // If default formatter has not been overridden,
-          // use one that displays milliseconds.
+          // use default one.
           formatter = DateUtil.DEFAULT_DATE_FORMATTER;
         }
-        return "'" + super.toStringLiteral(o, formatter) + "'";
+        return "'" + StringUtil.escapeStringConstant(super.toStringLiteral(o, formatter)) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
index 6956942..9883e12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarchar.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.base.Preconditions;
 
@@ -142,7 +143,7 @@ public class PVarchar extends PDataType<String> {
     if (formatter != null) {
       return "'" + formatter.format(o) + "'";
     }
-    return "'" + o + "'";
+    return "'" + StringUtil.escapeStringConstant(o.toString()) + "'";
   }
 
   private char[] sampleChars = new char[1];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index c058eb8..31b6350 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -453,8 +453,11 @@ public class IndexUtil {
         };
         ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
         StatementContext context = new StatementContext(statement, indexResolver);
-        Expression whereClause = WhereCompiler.compile(context, whereNode);
-        return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), whereClause);
+        // Compile to ensure validity
+        WhereCompiler.compile(context, whereNode);
+        StringBuilder buf = new StringBuilder();
+        whereNode.toSQL(indexResolver, buf);
+        return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
     }
     
     public static void wrapResultUsingOffset(final ObserverContext<RegionCoprocessorEnvironment> c,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index 88b68b0..e0b4c2e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -34,9 +34,9 @@ import javax.annotation.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.parse.WildcardParseNode;
@@ -76,6 +76,19 @@ public final class QueryUtil {
     private static final String SELECT = "SELECT";
     private static final String FROM = "FROM";
     private static final String WHERE = "WHERE";
+    private static final String[] CompareOpString = new String[CompareOp.values().length];
+    static {
+        CompareOpString[CompareOp.EQUAL.ordinal()] = "=";
+        CompareOpString[CompareOp.NOT_EQUAL.ordinal()] = "!=";
+        CompareOpString[CompareOp.GREATER.ordinal()] = ">";
+        CompareOpString[CompareOp.LESS.ordinal()] = "<";
+        CompareOpString[CompareOp.GREATER_OR_EQUAL.ordinal()] = ">=";
+        CompareOpString[CompareOp.LESS_OR_EQUAL.ordinal()] = "<=";
+    }
+
+    public static String toSQL(CompareOp op) {
+        return CompareOpString[op.ordinal()];
+    }
     
     /**
      * Private constructor
@@ -262,11 +275,11 @@ public final class QueryUtil {
         return getUrl(server, port);
     }
     
-    public static String getViewStatement(String schemaName, String tableName, Expression whereClause) {
+    public static String getViewStatement(String schemaName, String tableName, String where) {
         // Only form we currently support for VIEWs: SELECT * FROM t WHERE ...
         return SELECT + " " + WildcardParseNode.NAME + " " + FROM + " " +
                 (schemaName == null || schemaName.length() == 0 ? "" : ("\"" + schemaName + "\".")) +
                 ("\"" + tableName + "\" ") +
-                (WHERE + " " + whereClause.toString());
+                (WHERE + " " + where);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
index d65af15..a83098a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.util;
 
 import java.util.Arrays;
 
+import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.UndecodableByteException;
 import org.apache.phoenix.schema.SortOrder;
@@ -325,5 +326,9 @@ public class StringUtil {
         if (toIndex > length) {
             throw new ArrayIndexOutOfBoundsException(toIndex);
         }
+    }
+
+    public static String escapeStringConstant(String pattern) {
+        return StringEscapeUtils.escapeSql(pattern); // Need to escape double quotes
     }   
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
index 69c1bbf..01f28ae 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
@@ -79,12 +79,18 @@ import com.google.common.collect.ImmutableList;
 
 public class WhereCompilerTest extends BaseConnectionlessQueryTest {
 
+    private PhoenixPreparedStatement newPreparedStatement(PhoenixConnection pconn, String query) throws SQLException {
+        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        assertRoundtrip(query);
+        return pstmt;
+    }
+    
     @Test
     public void testSingleEqualFilter() throws SQLException {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -101,7 +107,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
         pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20");
         String query = "select * from t where k=" + 1;
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -122,7 +128,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
         pconn.createStatement().execute("CREATE TABLE t (k varchar primary key, v varchar) SALT_BUCKETS=20");
         String query = "select * from t where k='a'";
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -143,7 +149,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
         pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20");
         String query = "select * from t where k in (1,3)";
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -195,7 +201,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_string=b_string";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -212,7 +218,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,null) = 'foo'";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -229,7 +235,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         List<Object> binds = Arrays.<Object>asList(tenantId);
 
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
@@ -253,7 +259,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and 0 >= a_integer";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
 
@@ -272,7 +278,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String dateStr = "2012-01-01 12:00:00";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_date >= to_date('" + dateStr + "')";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -291,7 +297,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and x_decimal >= " + toNumberClause;
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -348,7 +354,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where substr(entity_id,1,3)=?";
         List<Object> binds = Arrays.<Object>asList(keyPrefix);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
@@ -374,7 +380,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where entity_id=?";
         List<Object> binds = Arrays.<Object>asList(keyPrefix);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
@@ -390,7 +396,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where organization_id=? AND entity_id=?";
         List<Object> binds = Arrays.<Object>asList(tenantId,keyPrefix);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
@@ -405,7 +411,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where substr(entity_id,1,3)=?";
         List<Object> binds = Arrays.<Object>asList(keyPrefix);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         // Degenerate b/c "foobar" is more than 3 characters
@@ -420,7 +426,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where a_string=?";
         List<Object> binds = Arrays.<Object>asList(aString);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         // Degenerate b/c a_string length is 100
@@ -435,7 +441,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = "select * from atable where organization_id=? and (substr(entity_id,1,3)=? or a_integer=?)";
         List<Object> binds = Arrays.<Object>asList(tenantId, keyPrefix, aInt);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
@@ -464,7 +470,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer > 'foo'";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
 
         try {
             pstmt.optimizeQuery();
@@ -479,7 +485,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2=3";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         assertDegenerate(plan.getContext());
     }
@@ -489,7 +495,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and 2=3";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         assertDegenerate(plan.getContext());
     }
@@ -499,7 +505,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and 2<=2";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         assertNull(scan.getFilter());
@@ -514,7 +520,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2<3";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -536,7 +542,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3!=3)";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -557,7 +563,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3>2)";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -573,7 +579,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_string IN ('a','b')";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
@@ -598,7 +604,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s','%s')",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1);
@@ -625,7 +631,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id='%s' OR organization_id='%s' OR organization_id='%s'",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
 
@@ -653,7 +659,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id='%s' AND entity_id IN ('%s','%s')",
                 ATABLE_NAME, tenantId, entityId1, entityId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + entityId1);
@@ -683,7 +689,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id>='%s' AND entity_id<='%s'",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -712,7 +718,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -735,7 +741,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId));
@@ -765,7 +771,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s') AND entity_id IN ('%s', '%s')",
                 ATABLE_NAME, tenantId1, tenantId2, entityId1, entityId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
 
@@ -789,7 +795,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id > '%s' AND organization_id < '%s'",
                 ATABLE_NAME, tenantId1, tenantId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
 
@@ -810,7 +816,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id IN ('%s', '%s')",
                 ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId1));
@@ -825,7 +831,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer between 0 and 10";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -847,7 +853,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and a_integer not between 0 and 10";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -875,7 +881,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
 
         String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
         PhoenixConnection pconn = DriverManager.getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -907,7 +913,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
 
         String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
@@ -934,7 +940,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
     public void testScanCaching_Default() throws SQLException {
         String query = "select * from atable where a_integer=0";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
         assertEquals(QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE, pstmt.getFetchSize());
@@ -945,7 +951,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
     public void testScanCaching_CustomFetchSizeOnStatement() throws SQLException {
         String query = "select * from atable where a_integer=0";
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
-        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
         final int FETCH_SIZE = 25;
         pstmt.setFetchSize(FETCH_SIZE);
         QueryPlan plan = pstmt.optimizeQuery();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index ad8e5f5..ddbacb7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -54,8 +54,8 @@ import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -86,6 +86,7 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
     private static StatementContext compileStatement(String query, List<Object> binds, Integer limit) throws SQLException {
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
         PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        assertRoundtrip(query);
         TestUtil.bindParams(pstmt, binds);
         QueryPlan plan = pstmt.compileQuery();
         assertEquals(limit, plan.getLimit());


[32/50] [abbrv] phoenix git commit: PHOENIX-1634 LocalIndexSplitter prevents region from auto split(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1634 LocalIndexSplitter prevents region from auto split(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e09a8f8d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e09a8f8d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e09a8f8d

Branch: refs/heads/calcite
Commit: e09a8f8de715ec17fbdedb28b9b4153b5886d85e
Parents: d4f7b71
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Mon Feb 16 23:22:57 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Mon Feb 16 23:22:57 2015 +0530

----------------------------------------------------------------------
 .../phoenix/coprocessor/MetaDataProtocol.java   |  5 ++-
 .../hbase/index/master/IndexMasterObserver.java | 47 ++++++++++++++++++++
 .../query/ConnectionQueryServicesImpl.java      | 33 +++++++++++---
 3 files changed, 78 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e09a8f8d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index be5fb4d..cf0aabb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -60,8 +60,8 @@ public abstract class MetaDataProtocol extends MetaDataService {
 
     public static final long MIN_TABLE_TIMESTAMP = 0;
 
-    // Incremented from 5 to 6 with the addition of the STORE_NULLS table option in 4.3
-    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_TABLE_TIMESTAMP + 6;
+    // Incremented from 5 to 7 with the addition of the STORE_NULLS table option in 4.3
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_TABLE_TIMESTAMP + 7;
     public static final int DEFAULT_MAX_META_DATA_VERSIONS = 1000;
     public static final int DEFAULT_MAX_STAT_DATA_VERSIONS = 3;
     public static final boolean DEFAULT_META_DATA_KEEP_DELETED_CELLS = true;
@@ -70,6 +70,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 = MIN_TABLE_TIMESTAMP + 3;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0 = MIN_TABLE_TIMESTAMP + 4;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1 = MIN_TABLE_TIMESTAMP + 5;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = MIN_TABLE_TIMESTAMP + 7;
     
     // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need
     // a different code for every type of error.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e09a8f8d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
index dfb2c62..1da5aff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
@@ -18,15 +18,22 @@
 package org.apache.phoenix.hbase.index.master;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.balancer.IndexLoadBalancer;
+import org.apache.phoenix.util.MetaDataUtil;
 
 /**
  * Defines of coprocessor hooks(to support secondary indexing) of operations on
@@ -60,6 +67,46 @@ public class IndexMasterObserver extends BaseMasterObserver {
     }
 
     @Override
+    public void preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
+            TableName tableName, HTableDescriptor htd) throws IOException {
+        HTableDescriptor oldDesc =
+                ctx.getEnvironment().getMasterServices().getTableDescriptors().get(tableName);
+        if (oldDesc.getValue(IndexLoadBalancer.PARENT_TABLE_KEY) == null
+                && htd.getValue(IndexLoadBalancer.PARENT_TABLE_KEY) != null) {
+            TableName userTableName =
+                    TableName.valueOf(htd.getValue(IndexLoadBalancer.PARENT_TABLE_KEY));
+            balancer.addTablesToColocate(userTableName, htd.getTableName());
+        }
+        super.preModifyTableHandler(ctx, tableName, htd);
+    }
+
+    @Override
+    public void postMove(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo region,
+            ServerName srcServer, ServerName destServer) throws IOException {
+        if (balancer != null && balancer.isTableColocated(region.getTable())) {
+            AssignmentManager am = ctx.getEnvironment().getMasterServices().getAssignmentManager();
+            RegionStates regionStates = am.getRegionStates();
+            String tableName = region.getTable().getNameAsString();
+            String correspondingTable =
+                    region.getTable().getNameAsString()
+                            .startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX) ? MetaDataUtil
+                            .getUserTableName(tableName) : MetaDataUtil
+                            .getLocalIndexTableName(tableName);
+            List<HRegionInfo> regions =
+                    regionStates.getRegionsOfTable(TableName.valueOf(correspondingTable));
+            for (HRegionInfo hri : regions) {
+                if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) == 0
+                        && destServer != null) {
+                    balancer.regionOnline(hri, destServer);
+                    am.addPlan(hri.getEncodedName(), new RegionPlan(hri, null, destServer));
+                    am.unassign(hri);
+                }
+            }
+        }
+        super.postMove(ctx, region, srcServer, destServer);
+    }
+
+    @Override
     public void postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
             TableName tableName) throws IOException {
         if (balancer != null && balancer.isTableColocated(tableName)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e09a8f8d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 2b508b5..b149b92 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1197,10 +1197,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         if (tableType == PTableType.INDEX) { // Index on view
             // Physical index table created up front for multi tenant
             // TODO: if viewIndexId is Short.MIN_VALUE, then we don't need to attempt to create it
-            if (physicalTableName != null && !MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) {
+            if (physicalTableName != null) {
                 if (localIndexTable) {
                     ensureLocalIndexTableCreated(tableName, tableProps, families, splits, MetaDataUtil.getClientTimeStamp(m));
-                } else {
+                } else if (!MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) {
                     ensureViewIndexTableCreated(tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes), physicalTableName, MetaDataUtil.getClientTimeStamp(m));
                 }
             }
@@ -1224,7 +1224,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 familiesPlusDefault.add(new Pair<byte[],Map<String,Object>>(defaultCF,Collections.<String,Object>emptyMap()));
             }
             ensureViewIndexTableCreated(tableName, tableProps, familiesPlusDefault, MetaDataUtil.isSalted(m, kvBuilder, ptr) ? splits : null, MetaDataUtil.getClientTimeStamp(m));
-            ensureLocalIndexTableCreated(MetaDataUtil.getLocalIndexPhysicalName(tableName), tableProps, familiesPlusDefault, splits);
         }
 
         byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
@@ -1848,8 +1847,32 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 // any new columns we've added.
                                 long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
 
-                                // We know that we always need to add the STORE_NULLS column for 4.3 release
-                                String columnsToAdd = PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                String columnsToAdd = "";
+                                if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) {
+                                    // We know that we always need to add the STORE_NULLS column for 4.3 release
+                                    columnsToAdd = PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                    HBaseAdmin admin = null;
+                                    try {
+                                        admin = getAdmin();
+                                        HTableDescriptor[] localIndexTables = admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
+                                        for (HTableDescriptor table : localIndexTables) {
+                                            if (table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null
+                                                    && table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null) {
+                                                table.setValue(MetaDataUtil.PARENT_TABLE_KEY,
+                                                    MetaDataUtil.getUserTableName(table
+                                                        .getNameAsString()));
+                                                // Explicitly disable, modify and enable the table to ensure co-location of data
+                                                // and index regions. If we just modify the table descriptor when online schema
+                                                // change enabled may reopen the region in same region server instead of following data region.
+                                                admin.disableTable(table.getTableName());
+                                                admin.modifyTable(table.getTableName(), table);
+                                                admin.enableTable(table.getTableName());
+                                            }
+                                        }
+                                    } finally {
+                                        if (admin != null) admin.close();
+                                    }
+                                }
 
                                 // If the server side schema is at before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then 
                                 // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too.


[38/50] [abbrv] phoenix git commit: PHOENIX-1078 Unable to run pig script with Phoenix in a secure HBase cluster

Posted by ma...@apache.org.
PHOENIX-1078 Unable to run pig script with Phoenix in a secure HBase cluster


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dab9d51b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dab9d51b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dab9d51b

Branch: refs/heads/calcite
Commit: dab9d51bf5805e4f123e6095444d690f79250253
Parents: 05723b1
Author: Jeffrey Zhong <je...@apache.org>
Authored: Thu Feb 12 19:22:17 2015 -0800
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Wed Feb 25 14:44:01 2015 -0800

----------------------------------------------------------------------
 .../phoenix/mapreduce/util/ConnectionUtil.java     |  9 +++++++++
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 17 +++++++++++++++++
 .../org/apache/phoenix/pig/PhoenixHBaseLoader.java |  9 ++++++---
 .../apache/phoenix/pig/PhoenixHBaseStorage.java    |  2 ++
 4 files changed, 34 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dab9d51b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
index 0864cba..364baf7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.mapreduce.util;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +44,13 @@ public class ConnectionUtil {
     public static Connection getConnection(final Configuration configuration) throws SQLException {
         Preconditions.checkNotNull(configuration);
         final Properties props = new Properties();
+        Iterator<Map.Entry<String, String>> iterator = configuration.iterator();
+        if(iterator != null) {
+            while (iterator.hasNext()) {
+                Map.Entry<String, String> entry = iterator.next();
+                props.setProperty(entry.getKey(), entry.getValue());
+            }
+        }
         final Connection conn = DriverManager.getConnection(QueryUtil.getUrl(configuration.get(HConstants.ZOOKEEPER_QUORUM)), props);
         return conn;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/dab9d51b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 83a606b..4d025ee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -19,13 +19,18 @@ package org.apache.phoenix.mapreduce.util;
 
 import static org.apache.commons.lang.StringUtils.isNotEmpty;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -296,4 +301,16 @@ public final class PhoenixConfigurationUtil {
         Preconditions.checkNotNull(configuration);
         return configuration.get(OUTPUT_TABLE_NAME);
     }
+
+    public static void loadHBaseConfiguration(Job job) throws IOException {
+        // load hbase-site.xml
+        Configuration hbaseConf = HBaseConfiguration.create();
+        for (Map.Entry<String, String> entry : hbaseConf) {
+            if (job.getConfiguration().get(entry.getKey()) == null) {
+                job.getConfiguration().set(entry.getKey(), entry.getValue());
+            }
+        }
+        //In order to have phoenix working on a secured cluster
+        TableMapReduceUtil.initCredentials(job);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/dab9d51b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
index 1218e82..18e362a 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
@@ -106,12 +106,13 @@ public final class PhoenixHBaseLoader extends LoadFunc implements LoadMetadata {
     }
     
     @Override
-    public void setLocation(String location, Job job) throws IOException {        
+    public void setLocation(String location, Job job) throws IOException {
+        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
+
         final Configuration configuration = job.getConfiguration();
         //explicitly turning off combining splits. 
         configuration.setBoolean("pig.noSplitCombination", true);
-        //to have phoenix working on a secured cluster
-        TableMapReduceUtil.initCredentials(job);
+
         this.initializePhoenixPigConfiguration(location, configuration);
     }
 
@@ -222,6 +223,8 @@ public final class PhoenixHBaseLoader extends LoadFunc implements LoadMetadata {
         if(schema != null) {
             return schema;
         }
+
+        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
         final Configuration configuration = job.getConfiguration();
         this.initializePhoenixPigConfiguration(location, configuration);
         this.schema = PhoenixPigSchemaUtil.getResourceSchema(this.config);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/dab9d51b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
index eb2c124..72d958b 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
@@ -140,6 +140,8 @@ public class PhoenixHBaseStorage implements StoreFuncInterface {
             if (!"hbase".equals(locationURI.getScheme())) {
                 throw new IOException(String.format("Location must use the hbase protocol, hbase://tableName[/columnList]. Supplied location=%s",location));
             }
+
+            PhoenixConfigurationUtil.loadHBaseConfiguration(job);
             config = job.getConfiguration();
             config.set(HConstants.ZOOKEEPER_QUORUM, server);
             String tableName = locationURI.getAuthority();


[44/50] [abbrv] phoenix git commit: PHOENIX-1489 Access column values positionally from client

Posted by ma...@apache.org.
PHOENIX-1489 Access column values positionally from client


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3f829751
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3f829751
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3f829751

Branch: refs/heads/calcite
Commit: 3f829751dd5158c526e5c5bd3da61dde1c6e4194
Parents: 0440aca
Author: maryannxue <we...@intel.com>
Authored: Tue Mar 3 12:12:49 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Tue Mar 3 12:12:49 2015 -0500

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  |  55 ++--
 .../phoenix/end2end/HashJoinLocalIndexIT.java   |   6 +-
 .../org/apache/phoenix/end2end/SubqueryIT.java  |  30 +--
 .../end2end/SubqueryUsingSortMergeJoinIT.java   |  32 +--
 .../index/GlobalIndexOptimizationIT.java        |  22 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |   4 +-
 .../phoenix/compile/ExpressionCompiler.java     |   2 +-
 .../apache/phoenix/compile/FromCompiler.java    |  87 ++++++-
 .../apache/phoenix/compile/JoinCompiler.java    | 258 ++++---------------
 .../apache/phoenix/compile/OrderByCompiler.java |   2 +-
 .../phoenix/compile/ProjectionCompiler.java     |  58 +++--
 .../apache/phoenix/compile/QueryCompiler.java   | 115 +++++----
 .../compile/TupleProjectionCompiler.java        | 214 +++++++++++++++
 .../apache/phoenix/compile/UpsertCompiler.java  |   4 +-
 .../apache/phoenix/compile/WhereCompiler.java   |   2 +-
 .../coprocessor/BaseScannerRegionObserver.java  |  21 +-
 .../GroupedAggregateRegionObserver.java         |  11 +-
 .../coprocessor/HashJoinRegionScanner.java      |  72 +++---
 .../phoenix/coprocessor/ScanRegionObserver.java |   9 +-
 .../UngroupedAggregateRegionObserver.java       |  10 +-
 .../apache/phoenix/execute/TupleProjector.java  |  18 +-
 .../org/apache/phoenix/join/HashJoinInfo.java   |  25 +-
 .../apache/phoenix/optimize/QueryOptimizer.java |  19 +-
 .../org/apache/phoenix/schema/ColumnRef.java    |   2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java |   9 +-
 .../apache/phoenix/schema/MetaDataClient.java   |   2 +-
 .../org/apache/phoenix/schema/PTableType.java   |   2 +-
 .../apache/phoenix/schema/ProjectedColumn.java  |  59 +++++
 .../org/apache/phoenix/schema/TableRef.java     |   8 +-
 .../phoenix/compile/WhereCompilerTest.java      |   6 +-
 30 files changed, 691 insertions(+), 473 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index e915b36..596e5e9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -219,7 +219,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
                 "    PARALLEL INNER-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I.item_id\" IN (\"O.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i2.item_id, i1.name FROM joinItemTable i1 
@@ -230,7 +230,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I1.item_id\" IN (\"I2.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -242,7 +242,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.supplier_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I1.item_id\" IN (\"I2.supplier_id\")",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -271,7 +271,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I.item_id\" IN (\"O.item_id\")",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -296,7 +296,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
+                "    DYNAMIC SERVER FILTER BY \"C.customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -416,7 +416,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.supplier_id\")\n" +
+                "    DYNAMIC SERVER FILTER BY \"S.supplier_id\" IN (\"I.supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
@@ -440,7 +440,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
+                "    DYNAMIC SERVER FILTER BY LHS.COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -452,7 +452,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -464,7 +464,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1, LHS.COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
                 /*
                  * testJoinWithSetMaxRows()
                  *     statement.setMaxRows(4);
@@ -478,7 +478,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT 4 ROW LIMIT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")\n" +
+                "    DYNAMIC SERVER FILTER BY \"I.item_id\" IN (\"O.item_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 }});
         testCases.add(new String[][] {
@@ -608,7 +608,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.:item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I1.item_id\" IN (\"I2.:item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -677,7 +677,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                    SERVER FILTER BY \"NAME\" != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
+                "    DYNAMIC SERVER FILTER BY \"C.customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -798,7 +798,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_SCHEMA + ".idx_item\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")\n" +
+                "    DYNAMIC SERVER FILTER BY \"S.supplier_id\" IN (\"I.0:supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
@@ -822,7 +822,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
+                "    DYNAMIC SERVER FILTER BY LHS.COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -834,7 +834,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -846,7 +846,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1, LHS.COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
                 /*
                  * testJoinWithSetMaxRows()
                  *     statement.setMaxRows(4);
@@ -918,9 +918,8 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_ITEM_TABLE_DISPLAY_NAME+" [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -989,7 +988,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I.:item_id\" IN (\"O.item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i2.item_id, i1.name FROM joinItemTable i1 
@@ -1001,7 +1000,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER "+ MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n"  +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.:item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I1.item_id\" IN (\"I2.:item_id\")",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -1015,7 +1014,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"I2.0:supplier_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I1.:item_id\" IN (\"I2.0:supplier_id\")",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -1051,7 +1050,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_CUSTOMER_TABLE_DISPLAY_NAME+" [-32768]\n"+
                 "                    SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "                CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")",
+                "    DYNAMIC SERVER FILTER BY \"I.:item_id\" IN (\"O.item_id\")",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -1077,7 +1076,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT MERGE SORT\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
+                "    DYNAMIC SERVER FILTER BY \"C.customer_id\" IN (\"O.customer_id\")",
                 /* 
                  * testJoinWithSubqueryAndAggregation()
                  *     SELECT i.name, sum(quantity) FROM joinOrderTable o 
@@ -1203,7 +1202,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")\n" +
+                "    DYNAMIC SERVER FILTER BY \"S.supplier_id\" IN (\"I.0:supplier_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
@@ -1227,7 +1226,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
+                "    DYNAMIC SERVER FILTER BY LHS.COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -1239,7 +1238,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
                  *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
@@ -1251,7 +1250,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                "    DYNAMIC SERVER FILTER BY (LHS.COL0, LHS.COL1, LHS.COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
                 /*
                  * testJoinWithSetMaxRows()
                  *     statement.setMaxRows(4);
@@ -1267,7 +1266,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT 4 ROW LIMIT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")\n" +
+                "    DYNAMIC SERVER FILTER BY \"I.:item_id\" IN (\"O.item_id\")\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 }});
         return testCases;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
index 2d0cc72..645d21b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
@@ -105,17 +105,17 @@ public class HashJoinLocalIndexIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'T6']\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")",
+                "    DYNAMIC SERVER FILTER BY \"S.:supplier_id\" IN (\"I.supplier_id\")",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768,'S1']\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"S.0:PHONE\"]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"S.PHONE\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'T6']\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"supplier_id\" IN (\"I.0:supplier_id\")",
+                "    DYNAMIC SERVER FILTER BY \"S.:supplier_id\" IN (\"I.supplier_id\")",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'S3']\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index 7bc97e7..f655e0a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -113,7 +113,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " \\['000000000000001'\\] - \\[\\*\\]\n" +
                 "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
+                "    DYNAMIC SERVER FILTER BY \"I.item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER SORTED BY [I.NAME]\n" +
@@ -129,7 +129,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -137,17 +137,17 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            SKIP-SCAN-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"" + JOIN_ITEM_TABLE_DISPLAY_NAME + ".item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER SORTED BY [NAME]\n" +
+                "    SERVER SORTED BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL ANTI-JOIN TABLE 0 (SKIP MERGE)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -165,9 +165,9 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"I.item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
-                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
+                "    DYNAMIC SERVER FILTER BY \"" + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + ".customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
         testCases.add(new String[][] {
                 {
@@ -200,7 +200,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -209,7 +209,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -255,7 +255,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " \\['000000000000001'\\] - \\[\\*\\]\n" +
                 "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
+                "    DYNAMIC SERVER FILTER BY \"I.:item_id\" IN \\(\\$\\d+.\\$\\d+\\)",
                             
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
@@ -274,7 +274,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -283,13 +283,13 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"" + JOIN_SCHEMA + ".idx_item.:item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
@@ -314,9 +314,9 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
+                "            DYNAMIC SERVER FILTER BY \"I.:item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
-                "    DYNAMIC SERVER FILTER BY \"customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
+                "    DYNAMIC SERVER FILTER BY \"" + JOIN_SCHEMA + ".idx_customer.:customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
         return testCases;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
index f931bae..59f75e5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
@@ -130,9 +130,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
+                "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"]\\\n" +
@@ -140,14 +140,14 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT SORTED BY \\[.*.CO_ITEM_ID, .*.CO_ITEM_NAME\\]\n" +
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\", NAME\\]\n" +
+                "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        SKIP-SCAN-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"" + JOIN_ITEM_TABLE_DISPLAY_NAME + ".item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",            
 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
@@ -163,7 +163,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"I.item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         testCases.add(new String[][] {
@@ -197,9 +197,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
+                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -208,9 +208,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
+                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -267,9 +267,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
+                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -278,14 +278,14 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
+                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"" + JOIN_SCHEMA + ".idx_item.:item_id\" IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
@@ -305,7 +305,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
+                "        DYNAMIC SERVER FILTER BY \"I.:item_id\" IN \\(\"O.item_id\"\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         return testCases;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index 7fb879e..07d87b7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -113,7 +113,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\['a'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T.T_ID\", \"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             String actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -138,7 +138,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\['a'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T.T_ID\", \"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -163,12 +163,12 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             expected = 
                     "CLIENT PARALLEL \\d-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME + "\n" +
                     "    SERVER FILTER BY K3 > 1\n" +
-                    "    SERVER SORTED BY \\[V1, T_ID\\]\n" +
+                    "    SERVER SORTED BY \\[T.V1, T.T_ID\\]\n" +
                     "CLIENT MERGE SORT\n" +
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\[\\*\\] - \\['z'\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"T.T_ID\", \"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -198,12 +198,12 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             
             expected = 
                     "CLIENT PARALLEL \\d-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME + "\n" +
-                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T_ID, V1, K3\\]\n" +
+                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T.T_ID, T.V1, T.K3\\]\n" +
                             "CLIENT MERGE SORT\n" +
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\[\\*\\] - \\['z'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                            "    DYNAMIC SERVER FILTER BY \\(\"T.T_ID\", \"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -231,13 +231,13 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
             expected = 
                     "CLIENT PARALLEL \\d-WAY FULL SCAN OVER T\n" +
-                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[V1\\]\n" +
+                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T.V1\\]\n" +
                             "CLIENT MERGE SORT\n" +
-                            "CLIENT SORTED BY \\[V1\\]\n" +
+                            "CLIENT SORTED BY \\[T.V1\\]\n" +
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER I \\[\\*\\] - \\['z'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                            "    DYNAMIC SERVER FILTER BY \\(\"T.T_ID\", \"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             actual = QueryUtil.getExplainPlan(rs);
             assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
             
@@ -275,7 +275,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER I \\['tid1','a'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                            "    DYNAMIC SERVER FILTER BY \\(\"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                            "    DYNAMIC SERVER FILTER BY \\(\"T.K1\", \"T.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected, actual));
             
             rs = conn1.createStatement().executeQuery(query);
@@ -323,7 +323,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_T \\[-32768,1\\] - \\[-32768,2\\]\n" +
                     "            SERVER FILTER BY FIRST KEY ONLY AND \"K2\" IN \\(3,4\\)\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "    DYNAMIC SERVER FILTER BY \\(\"V.T_ID\", \"V.K1\", \"V.K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected,actual));
             
             rs = conn1.createStatement().executeQuery(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 4080730..a7b7655 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -323,7 +323,7 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
             assertEquals(
                   "CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER " + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME) + " [-32768,*] - [-32768,'z']\n"
                 + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                + "    SERVER SORTED BY [K3]\n" +
+                + "    SERVER SORTED BY [\"K3\"]\n" +
                 "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
  
             rs = conn1.createStatement().executeQuery(query);
@@ -471,7 +471,7 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
                         + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)+" [-32768,*] - [-32768,'z']\n"
                         + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"V1\", \"T_ID\", K3]\n" + "CLIENT MERGE SORT",
+                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"V1\", \"T_ID\", \"K3\"]\n" + "CLIENT MERGE SORT",
                 QueryUtil.getExplainPlan(rs));
             
             rs = conn1.createStatement().executeQuery(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 81e4059..52c67f1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -572,7 +572,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
              * indexed columns. Without this check present we wrongly and unnecessarily
              * end up creating a RoundExpression. 
              */
-            if (context.getResolver().getTables().get(0).getTable().getType() != PTableType.INDEX) {
+            if (context.getCurrentTable().getTable().getType() != PTableType.INDEX) {
                 expr =  convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children);
             }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 1a605b7..a57250e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -21,8 +21,10 @@ import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HTableInterface;
@@ -40,6 +42,7 @@ import org.apache.phoenix.parse.FamilyWildcardParseNode;
 import org.apache.phoenix.parse.JoinTableNode;
 import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.SingleTableStatement;
 import org.apache.phoenix.parse.TableName;
@@ -65,15 +68,19 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ListMultimap;
@@ -178,7 +185,6 @@ public class FromCompiler {
     public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, TableRef tableRef, RowProjector projector)
             throws SQLException {
         List<PColumn> projectedColumns = new ArrayList<PColumn>();
-        List<Expression> sourceExpressions = new ArrayList<Expression>();
         PTable table = tableRef.getTable();
         for (PColumn column : table.getColumns()) {
             Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression();
@@ -186,7 +192,6 @@ public class FromCompiler {
                     sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
                     column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr());
             projectedColumns.add(projectedColumn);
-            sourceExpressions.add(sourceExpression);
         }
         PTable t = PTableImpl.makePTable(table, projectedColumns);
         return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols()));
@@ -207,6 +212,10 @@ public class FromCompiler {
         SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), false);
         return visitor;
     }
+    
+    public static ColumnResolver getResolverForProjectedTable(PTable projectedTable) {
+        return new ProjectedTableColumnResolver(projectedTable);
+    }
 
     private static class SingleTableColumnResolver extends BaseColumnResolver {
     	private final List<TableRef> tableRefs;
@@ -401,8 +410,8 @@ public class FromCompiler {
     }
 
     private static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor<Void> {
-        private final ListMultimap<String, TableRef> tableMap;
-        private final List<TableRef> tables;
+        protected final ListMultimap<String, TableRef> tableMap;
+        protected final List<TableRef> tables;
 
         private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition) {
         	super(connection, tsAddition);
@@ -571,4 +580,74 @@ public class FromCompiler {
         }
 
     }
+    
+    private static class ProjectedTableColumnResolver extends MultiTableColumnResolver {
+        private final boolean isLocalIndex;
+        private final List<TableRef> theTableRefs;
+        private final Map<ColumnRef, Integer> columnRefMap;
+        
+        private ProjectedTableColumnResolver(PTable projectedTable) {
+            super(null, 0);
+            Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED);
+            this.isLocalIndex = projectedTable.getIndexType() == IndexType.LOCAL;
+            this.columnRefMap = new HashMap<ColumnRef, Integer>();
+            long ts = Long.MAX_VALUE;
+            for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i < projectedTable.getColumns().size(); i++) {
+                PColumn column = projectedTable.getColumns().get(i);
+                ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef();
+                TableRef tableRef = colRef.getTableRef();
+                if (!tables.contains(tableRef)) {
+                    String alias = tableRef.getTableAlias();
+                    if (alias != null) {
+                        this.tableMap.put(alias, tableRef);
+                    }
+                    String name = tableRef.getTable().getName().getString();
+                    if (alias == null || !alias.equals(name)) {
+                        tableMap.put(name, tableRef);
+                    }
+                    tables.add(tableRef);
+                    if (tableRef.getLowerBoundTimeStamp() < ts) {
+                        ts = tableRef.getLowerBoundTimeStamp();
+                    }
+                }
+                this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), column.getPosition());
+            }
+            this.theTableRefs = ImmutableList.of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false));
+        }
+        
+        @Override
+        public List<TableRef> getTables() {
+            return theTableRefs;
+        }
+        
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+            ColumnRef colRef;
+            try {
+                colRef = super.resolveColumn(schemaName, tableName, colName);
+            } catch (ColumnNotFoundException e) {
+                // This could be a ColumnRef for local index data column.
+                TableRef tableRef = isLocalIndex ? super.getTables().get(0) : super.resolveTable(schemaName, tableName);
+                if (tableRef.getTable().getIndexType() == IndexType.LOCAL) {
+                    try {
+                        TableRef parentTableRef = super.resolveTable(
+                                tableRef.getTable().getSchemaName().getString(),
+                                tableRef.getTable().getParentTableName().getString());
+                        colRef = new ColumnRef(parentTableRef,
+                                IndexUtil.getDataColumnFamilyName(colName),
+                                IndexUtil.getDataColumnName(colName));
+                    } catch (TableNotFoundException te) {
+                        throw e;
+                    }
+                } else {
+                    throw e;
+                }
+            }
+            Integer position = columnRefMap.get(colRef);
+            if (position == null)
+                throw new ColumnNotFoundException(colName);
+            
+            return new ColumnRef(theTableRefs.get(0), position);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index c29ea23..98b7edb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.compile;
 
-import static org.apache.phoenix.schema.SaltingUtil.SALTING_COLUMN;
-
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -35,7 +33,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.CoerceExpression;
 import org.apache.phoenix.expression.Expression;
@@ -67,18 +64,18 @@ import org.apache.phoenix.parse.TableNode;
 import org.apache.phoenix.parse.TableNodeVisitor;
 import org.apache.phoenix.parse.TableWildcardParseNode;
 import org.apache.phoenix.parse.WildcardParseNode;
-import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.LocalIndexDataColumnRef;
 import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
 import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
@@ -95,8 +92,7 @@ import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ListMultimap;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
@@ -722,24 +718,19 @@ public class JoinCompiler {
             }
         }
 
-        public ProjectedPTableWrapper createProjectedTable(boolean retainPKColumns, StatementContext context) throws SQLException {
+        public PTable createProjectedTable(boolean retainPKColumns, StatementContext context) throws SQLException {
             assert(!isSubselect());
-            List<PColumn> projectedColumns = new ArrayList<PColumn>();
-            List<Expression> sourceExpressions = new ArrayList<Expression>();
-            ListMultimap<String, String> columnNameMap = ArrayListMultimap.<String, String>create();
+            List<ColumnRef> sourceColumns = new ArrayList<ColumnRef>();
             PTable table = tableRef.getTable();
-            boolean hasSaltingColumn = retainPKColumns && table.getBucketNum() != null;
             if (retainPKColumns) {
                 for (PColumn column : table.getPKColumns()) {
-                    addProjectedColumn(projectedColumns, sourceExpressions, columnNameMap,
-                            column, column.getFamilyName(), hasSaltingColumn, false, context);
+                    sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
                 }
             }
             if (isWildCardSelect()) {
                 for (PColumn column : table.getColumns()) {
                     if (!retainPKColumns || !SchemaUtil.isPKColumn(column)) {
-                        addProjectedColumn(projectedColumns, sourceExpressions, columnNameMap,
-                                column, PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), hasSaltingColumn, false, context);
+                        sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
                     }
                 }
             } else {
@@ -748,76 +739,27 @@ public class JoinCompiler {
                     if (e.getValue() != ColumnRefType.PREFILTER
                             && columnRef.getTableRef().equals(tableRef)
                             && (!retainPKColumns || !SchemaUtil.isPKColumn(columnRef.getColumn()))) {
-                        PColumn column = columnRef.getColumn();
-                        addProjectedColumn(projectedColumns, sourceExpressions, columnNameMap,
-                                column, PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), hasSaltingColumn,
-                                columnRef instanceof LocalIndexColumnRef, context);
+                        if (columnRef instanceof LocalIndexColumnRef) {
+                            sourceColumns.add(new LocalIndexDataColumnRef(context, IndexUtil.getIndexColumnName(columnRef.getColumn())));
+                        } else {
+                            sourceColumns.add(columnRef);
+                        }
                     }
                 }
             }
 
-            PTable t = PTableImpl.makePTable(table.getTenantId(), PNameFactory.newName(PROJECTED_TABLE_SCHEMA), table.getName(), PTableType.JOIN,
-                        table.getIndexState(), table.getTimeStamp(), table.getSequenceNumber(), table.getPKName(),
-                        retainPKColumns ? table.getBucketNum() : null, projectedColumns, table.getParentSchemaName(),
-                        table.getParentTableName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName>emptyList(), null, null,
-                        table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
-                        table.getIndexType());
-            return new ProjectedPTableWrapper(t, columnNameMap, sourceExpressions);
-        }
-
-        private void addProjectedColumn(List<PColumn> projectedColumns, List<Expression> sourceExpressions,
-                ListMultimap<String, String> columnNameMap, PColumn sourceColumn, PName familyName, boolean hasSaltingColumn,
-                boolean isLocalIndexColumnRef, StatementContext context)
-        throws SQLException {
-            if (sourceColumn == SALTING_COLUMN)
-                return;
-
-            int position = projectedColumns.size() + (hasSaltingColumn ? 1 : 0);
-            PTable table = tableRef.getTable();
-            String schemaName = table.getSchemaName().getString();
-            String tableName = table.getTableName().getString();
-            String colName = isLocalIndexColumnRef ? IndexUtil.getIndexColumnName(sourceColumn) : sourceColumn.getName().getString();
-            String fullName = getProjectedColumnName(schemaName, tableName, colName);
-            String aliasedName = tableRef.getTableAlias() == null ? fullName : getProjectedColumnName(null, tableRef.getTableAlias(), colName);
-
-            columnNameMap.put(colName, aliasedName);
-            if (!fullName.equals(aliasedName)) {
-                columnNameMap.put(fullName, aliasedName);
-            }
-
-            PName name = PNameFactory.newName(aliasedName);
-            PColumnImpl column = new PColumnImpl(name, familyName, sourceColumn.getDataType(),
-                    sourceColumn.getMaxLength(), sourceColumn.getScale(), sourceColumn.isNullable(),
-                    position, sourceColumn.getSortOrder(), sourceColumn.getArraySize(), sourceColumn.getViewConstant(), sourceColumn.isViewReferenced(), sourceColumn.getExpressionStr());
-            Expression sourceExpression = isLocalIndexColumnRef ?
-                      NODE_FACTORY.column(TableName.create(schemaName, tableName), "\"" + colName + "\"", null).accept(new ExpressionCompiler(context))
-                    : new ColumnRef(tableRef, sourceColumn.getPosition()).newColumnExpression();
-            projectedColumns.add(column);
-            sourceExpressions.add(sourceExpression);
+            return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, retainPKColumns);
         }
 
-        public ProjectedPTableWrapper createProjectedTable(RowProjector rowProjector) throws SQLException {
+        public PTable createProjectedTable(RowProjector rowProjector) throws SQLException {
             assert(isSubselect());
-            List<PColumn> projectedColumns = new ArrayList<PColumn>();
-            List<Expression> sourceExpressions = new ArrayList<Expression>();
-            ListMultimap<String, String> columnNameMap = ArrayListMultimap.<String, String>create();
+            TableRef tableRef = FromCompiler.getResolverForCompiledDerivedTable(statement.getConnection(), this.tableRef, rowProjector).getTables().get(0);
+            List<ColumnRef> sourceColumns = new ArrayList<ColumnRef>();
             PTable table = tableRef.getTable();
             for (PColumn column : table.getColumns()) {
-                String colName = getProjectedColumnName(null, tableRef.getTableAlias(), column.getName().getString());
-                Expression sourceExpression = rowProjector.getColumnProjector(column.getPosition()).getExpression();
-                PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(colName), PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY),
-                        sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
-                        column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr());
-                projectedColumns.add(projectedColumn);
-                sourceExpressions.add(sourceExpression);
+                sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
             }
-            PTable t = PTableImpl.makePTable(table.getTenantId(), PNameFactory.newName(PROJECTED_TABLE_SCHEMA), table.getName(), PTableType.JOIN,
-                        table.getIndexState(), table.getTimeStamp(), table.getSequenceNumber(), table.getPKName(),
-                        null, projectedColumns, table.getParentSchemaName(),
-                        table.getParentTableName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName>emptyList(), null, null,
-                        table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
-                        table.getViewIndexId(), table.getIndexType());
-            return new ProjectedPTableWrapper(t, columnNameMap, sourceExpressions);
+            return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, false);
         }
     }
 
@@ -1120,7 +1062,6 @@ public class JoinCompiler {
         }
     }
 
-    private static final String PROJECTED_TABLE_SCHEMA = ".";
     // for creation of new statements
     private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory();
 
@@ -1215,7 +1156,7 @@ public class JoinCompiler {
         }
         JoinTable join = compile(statement, select, resolver);
         if (groupByTableRef != null || orderByTableRef != null) {
-            QueryCompiler compiler = new QueryCompiler(statement, select, resolver);
+            QueryCompiler compiler = new QueryCompiler(statement, select, resolver, false);
             List<Object> binds = statement.getParameters();
             StatementContext ctx = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
             QueryPlan plan = compiler.compileJoinQuery(ctx, binds, join, false, false, null);
@@ -1329,147 +1270,34 @@ public class JoinCompiler {
         return NODE_FACTORY.select(from, hintNode, false, selectList, where, groupBy, null, orderBy, null, 0, groupBy != null, hasSequence);
     }
 
-    public class PTableWrapper {
-    	protected PTable table;
-    	protected ListMultimap<String, String> columnNameMap;
-
-    	protected PTableWrapper(PTable table, ListMultimap<String, String> columnNameMap) {
-    		this.table = table;
-    		this.columnNameMap = columnNameMap;
-    	}
-
-    	public PTable getTable() {
-    		return table;
-    	}
-
-    	public ListMultimap<String, String> getColumnNameMap() {
-    		return columnNameMap;
-    	}
-
-    	public List<String> getMappedColumnName(String name) {
-    		return columnNameMap.get(name);
-    	}
-
-        public ColumnResolver createColumnResolver() {
-            return new JoinedTableColumnResolver(this, origResolver);
-        }
-
-        public PTableWrapper mergeProjectedTables(PTableWrapper rWrapper, JoinType type) throws SQLException {
-            PTable left = this.getTable();
-            PTable right = rWrapper.getTable();
-            List<PColumn> merged = Lists.<PColumn> newArrayList();
-            if (type != JoinType.Full) {
-                merged.addAll(left.getColumns());
-            } else {
-                for (PColumn c : left.getColumns()) {
-                    if (SchemaUtil.isPKColumn(c)) {
-                        merged.add(c);
-                    } else {
-                        PColumnImpl column = new PColumnImpl(c.getName(), c.getFamilyName(), c.getDataType(),
-                                c.getMaxLength(), c.getScale(), true, c.getPosition(),
-                                c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced(), c.getExpressionStr());
-                        merged.add(column);
-                    }
-                }
+    public static PTable joinProjectedTables(PTable left, PTable right, JoinType type) throws SQLException {
+        Preconditions.checkArgument(left.getType() == PTableType.PROJECTED);
+        Preconditions.checkArgument(right.getType() == PTableType.PROJECTED);
+        List<PColumn> merged = Lists.<PColumn> newArrayList();
+        if (type == JoinType.Full) {
+            for (PColumn c : left.getColumns()) {
+                merged.add(new ProjectedColumn(c.getName(), c.getFamilyName(),
+                        c.getPosition(), true, ((ProjectedColumn) c).getSourceColumnRef()));
             }
-            int position = merged.size();
-            for (PColumn c : right.getColumns()) {
-                if (!SchemaUtil.isPKColumn(c)) {
-                    PColumnImpl column = new PColumnImpl(c.getName(),
-                            PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), c.getDataType(),
-                            c.getMaxLength(), c.getScale(), type == JoinType.Inner ? c.isNullable() : true, position++,
-                            c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced(), c.getExpressionStr());
-                    merged.add(column);
-                }
-            }
-            if (left.getBucketNum() != null) {
-                merged.remove(0);
+        } else {
+            merged.addAll(left.getColumns());
+        }
+        int position = merged.size();
+        for (PColumn c : right.getColumns()) {
+            if (!SchemaUtil.isPKColumn(c)) {
+                PColumn column = new ProjectedColumn(c.getName(), c.getFamilyName(), 
+                        position++, type == JoinType.Inner ? c.isNullable() : true, 
+                        ((ProjectedColumn) c).getSourceColumnRef());
+                merged.add(column);
             }
-            PTable t = PTableImpl.makePTable(left.getTenantId(), left.getSchemaName(),
-                    PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(), right.getName().getString())), left.getType(), left.getIndexState(), left.getTimeStamp(), left.getSequenceNumber(), left.getPKName(), left.getBucketNum(), merged,
-                    left.getParentSchemaName(), left.getParentTableName(), left.getIndexes(), left.isImmutableRows(), Collections.<PName>emptyList(), null, null, PTable.DEFAULT_DISABLE_WAL, left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(), left.getIndexType());
-
-            ListMultimap<String, String> mergedMap = ArrayListMultimap.<String, String>create();
-            mergedMap.putAll(this.getColumnNameMap());
-            mergedMap.putAll(rWrapper.getColumnNameMap());
-
-            return new PTableWrapper(t, mergedMap);
         }
-    }
-
-    public class ProjectedPTableWrapper extends PTableWrapper {
-    	private List<Expression> sourceExpressions;
-
-    	protected ProjectedPTableWrapper(PTable table, ListMultimap<String, String> columnNameMap, List<Expression> sourceExpressions) {
-    		super(table, columnNameMap);
-    		this.sourceExpressions = sourceExpressions;
-    	}
-
-    	public Expression getSourceExpression(PColumn column) {
-    		return sourceExpressions.get(column.getPosition() - (table.getBucketNum() == null ? 0 : 1));
-    	}
-
-        public TupleProjector createTupleProjector() {
-            return new TupleProjector(this);
+        if (left.getBucketNum() != null) {
+            merged.remove(0);
         }
-    }
-
-    public static class JoinedTableColumnResolver implements ColumnResolver {
-    	private PTableWrapper table;
-    	private ColumnResolver tableResolver;
-    	private TableRef tableRef;
-
-    	private JoinedTableColumnResolver(PTableWrapper table, ColumnResolver tableResolver) {
-    		this.table = table;
-    		this.tableResolver = tableResolver;
-            this.tableRef = new TableRef(ParseNodeFactory.createTempAlias(), table.getTable(), 0, false);
-    	}
-
-        public PTableWrapper getPTableWrapper() {
-            return table;
-        }
-
-        public TableRef getTableRef() {
-            return tableRef;
-        }
-
-		@Override
-		public List<TableRef> getTables() {
-			return tableResolver.getTables();
-		}
-
-        @Override
-        public TableRef resolveTable(String schemaName, String tableName)
-                throws SQLException {
-            return tableResolver.resolveTable(schemaName, tableName);
-        }
-
-		@Override
-		public ColumnRef resolveColumn(String schemaName, String tableName,
-				String colName) throws SQLException {
-			String name = getProjectedColumnName(schemaName, tableName, colName);
-			try {
-				PColumn column = tableRef.getTable().getColumn(name);
-				return new ColumnRef(tableRef, column.getPosition());
-			} catch (ColumnNotFoundException e) {
-				List<String> names = table.getMappedColumnName(name);
-				if (names.size() == 1) {
-					PColumn column = tableRef.getTable().getColumn(names.get(0));
-					return new ColumnRef(tableRef, column.getPosition());
-				}
-
-				if (names.size() > 1) {
-					throw new AmbiguousColumnException(name);
-				}
-
-				throw e;
-			}
-		}
-    }
-
-    private static String getProjectedColumnName(String schemaName, String tableName,
-			String colName) {
-    	return SchemaUtil.getColumnName(SchemaUtil.getTableName(schemaName, tableName), colName);
+        
+        return PTableImpl.makePTable(left.getTenantId(), left.getSchemaName(),
+                PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(), right.getName().getString())), left.getType(), left.getIndexState(), left.getTimeStamp(), left.getSequenceNumber(), left.getPKName(), left.getBucketNum(), merged,
+                left.getParentSchemaName(), left.getParentTableName(), left.getIndexes(), left.isImmutableRows(), Collections.<PName>emptyList(), null, null, PTable.DEFAULT_DISABLE_WAL, left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(), left.getIndexType());
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
index 444b05e..215f59e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
@@ -123,7 +123,7 @@ public class OrderByCompiler {
                 // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types.
                 if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN)
                         && !context.getScanRanges().useSkipScanFilter()
-                        && context.getCurrentTable().getTable().getType() != PTableType.JOIN
+                        && context.getCurrentTable().getTable().getType() != PTableType.PROJECTED
                         && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
                     return OrderBy.REV_ROW_KEY_ORDER_BY;
                 }


[43/50] [abbrv] phoenix git commit: PHOENIX-1489 Access column values positionally from client

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index 27fe0f9..e84ca2a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -62,6 +62,7 @@ import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.TableWildcardParseNode;
 import org.apache.phoenix.parse.WildcardParseNode;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ArgumentTypeMismatchException;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
@@ -144,12 +145,21 @@ public class ProjectionCompiler {
             }
             ColumnRef ref = new ColumnRef(tableRef,i);
             String colName = ref.getColumn().getName().getString();
+            String tableAlias = tableRef.getTableAlias();
             if (resolveColumn) {
-                if (tableRef.getTableAlias() != null) {
-                    ref = resolver.resolveColumn(null, tableRef.getTableAlias(), colName);
-                } else {
-                    String schemaName = table.getSchemaName().getString();
-                    ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, table.getTableName().getString(), colName);
+                try {
+                    if (tableAlias != null) {
+                        ref = resolver.resolveColumn(null, tableAlias, colName);
+                    } else {
+                        String schemaName = table.getSchemaName().getString();
+                        ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, table.getTableName().getString(), colName);
+                    }
+                } catch (AmbiguousColumnException e) {
+                    if (column.getFamilyName() != null) {
+                        ref = resolver.resolveColumn(tableAlias != null ? tableAlias : table.getTableName().getString(), column.getFamilyName().getString(), colName);
+                    } else {
+                        throw e;
+                    }
                 }
             }
             Expression expression = ref.newColumnExpression();
@@ -219,12 +229,21 @@ public class ProjectionCompiler {
                 }
             }
             String colName = tableColumn.getName().getString();
+            String tableAlias = tableRef.getTableAlias();
             if (resolveColumn) {
-                if (tableRef.getTableAlias() != null) {
-                    ref = resolver.resolveColumn(null, tableRef.getTableAlias(), indexColName);
-                } else {
-                    String schemaName = index.getSchemaName().getString();
-                    ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
+                try {
+                    if (tableAlias != null) {
+                        ref = resolver.resolveColumn(null, tableAlias, indexColName);
+                    } else {
+                        String schemaName = index.getSchemaName().getString();
+                        ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
+                    }
+                } catch (AmbiguousColumnException e) {
+                    if (indexColumn.getFamilyName() != null) {
+                        ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
+                    } else {
+                        throw e;
+                    }
                 }
             }
             Expression expression = ref.newColumnExpression();
@@ -238,11 +257,14 @@ public class ProjectionCompiler {
         }
     }
     
-    private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
+    private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
         PTable table = tableRef.getTable();
         PColumnFamily pfamily = table.getColumnFamily(cfName);
         for (PColumn column : pfamily.getColumns()) {
             ColumnRef ref = new ColumnRef(tableRef, column.getPosition());
+            if (resolveColumn) {
+                ref = context.getResolver().resolveColumn(table.getTableName().getString(), cfName, column.getName().getString());
+            }
             Expression expression = ref.newColumnExpression();
             projectedExpressions.add(expression);
             String colName = column.getName().toString();
@@ -252,7 +274,7 @@ public class ProjectionCompiler {
         }
     }
 
-    private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
+    private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
         PTable index = tableRef.getTable();
         PhoenixConnection conn = context.getConnection();
         String tableName = index.getParentName().getString();
@@ -277,6 +299,9 @@ public class ProjectionCompiler {
                     throw e;
                 }
             }
+            if (resolveColumn) {
+                ref = context.getResolver().resolveColumn(index.getTableName().getString(), indexColumn.getFamilyName() == null ? null : indexColumn.getFamilyName().getString(), indexColName);
+            }
             Expression expression = ref.newColumnExpression();
             projectedExpressions.add(expression);
             String colName = column.getName().toString();
@@ -322,6 +347,7 @@ public class ProjectionCompiler {
         ColumnResolver resolver = context.getResolver();
         TableRef tableRef = context.getCurrentTable();
         PTable table = tableRef.getTable();
+        boolean resolveColumn = !tableRef.equals(resolver.getTables().get(0));
         boolean isWildcard = false;
         Scan scan = context.getScan();
         int index = 0;
@@ -336,9 +362,9 @@ public class ProjectionCompiler {
                 }
                 isWildcard = true;
                 if (tableRef.getTable().getType() == PTableType.INDEX && ((WildcardParseNode)node).isRewrite()) {
-                	projectAllIndexColumns(context, tableRef, false, projectedExpressions, projectedColumns, targetColumns);
+                	projectAllIndexColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
                 } else {
-                    projectAllTableColumns(context, tableRef, false, projectedExpressions, projectedColumns, targetColumns);
+                    projectAllTableColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
                 }
             } else if (node instanceof TableWildcardParseNode) {
                 TableName tName = ((TableWildcardParseNode) node).getTableName();
@@ -362,9 +388,9 @@ public class ProjectionCompiler {
                 // columns are projected (which is currently true, but could change).
                 projectedFamilies.add(Bytes.toBytes(cfName));
                 if (tableRef.getTable().getType() == PTableType.INDEX && ((FamilyWildcardParseNode)node).isRewrite()) {
-                    projectIndexColumnFamily(context, cfName, tableRef, projectedExpressions, projectedColumns);
+                    projectIndexColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
                 } else {
-                    projectTableColumnFamily(context, cfName, tableRef, projectedExpressions, projectedColumns);
+                    projectTableColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
                 }
             } else {
                 Expression expression = node.accept(selectVisitor);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 137f4e9..2276f4e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -28,9 +28,6 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.JoinCompiler.JoinSpec;
 import org.apache.phoenix.compile.JoinCompiler.JoinTable;
-import org.apache.phoenix.compile.JoinCompiler.JoinedTableColumnResolver;
-import org.apache.phoenix.compile.JoinCompiler.PTableWrapper;
-import org.apache.phoenix.compile.JoinCompiler.ProjectedPTableWrapper;
 import org.apache.phoenix.compile.JoinCompiler.Table;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.execute.AggregatePlan;
@@ -100,14 +97,23 @@ public class QueryCompiler {
     private final List<? extends PDatum> targetColumns;
     private final ParallelIteratorFactory parallelIteratorFactory;
     private final SequenceManager sequenceManager;
+    private final boolean projectTuples;
     private final boolean useSortMergeJoin;
     private final boolean noChildParentJoinOptimization;
 
     public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) throws SQLException {
-        this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement));
+        this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement), true);
+    }
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, boolean projectTuples) throws SQLException {
+        this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement), projectTuples);
     }
 
     public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager) throws SQLException {
+        this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true);
+    }
+    
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples) throws SQLException {
         this.statement = statement;
         this.select = select;
         this.resolver = resolver;
@@ -115,6 +121,7 @@ public class QueryCompiler {
         this.targetColumns = targetColumns;
         this.parallelIteratorFactory = parallelIteratorFactory;
         this.sequenceManager = sequenceManager;
+        this.projectTuples = projectTuples;
         this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN);
         this.noChildParentJoinOptimization = select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION);
         if (statement.getConnection().getQueryServices().getLowestClusterHBaseVersion() >= PhoenixDatabaseMetaData.ESSENTIAL_FAMILY_VERSION_THRESHOLD) {
@@ -194,30 +201,32 @@ public class QueryCompiler {
             SelectStatement subquery = table.getAsSubquery(orderBy);
             if (!table.isSubselect()) {
                 context.setCurrentTable(table.getTableRef());
-                ProjectedPTableWrapper projectedTable = table.createProjectedTable(!projectPKColumns, context);
-                TupleProjector.serializeProjectorIntoScan(context.getScan(), projectedTable.createTupleProjector());
-                context.setResolver(projectedTable.createColumnResolver());
+                PTable projectedTable = table.createProjectedTable(!projectPKColumns, context);
+                TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
                 table.projectColumns(context.getScan());
                 return compileSingleQuery(context, subquery, binds, asSubquery, !asSubquery);
             }
             QueryPlan plan = compileSubquery(subquery);
-            ProjectedPTableWrapper projectedTable = table.createProjectedTable(plan.getProjector());
-            context.setResolver(projectedTable.createColumnResolver());
-            return new TupleProjectionPlan(plan, projectedTable.createTupleProjector(), table.compilePostFilterExpression(context));
+            PTable projectedTable = table.createProjectedTable(plan.getProjector());
+            context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
+            return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context));
         }
 
         boolean[] starJoinVector;
         if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) {
             Table table = joinTable.getTable();
-            ProjectedPTableWrapper initialProjectedTable;
+            PTable initialProjectedTable;
             TableRef tableRef;
             SelectStatement query;
+            TupleProjector tupleProjector;
             if (!table.isSubselect()) {
                 context.setCurrentTable(table.getTableRef());
                 initialProjectedTable = table.createProjectedTable(!projectPKColumns, context);
                 tableRef = table.getTableRef();
                 table.projectColumns(context.getScan());
                 query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery);
+                tupleProjector = new TupleProjector(initialProjectedTable);
             } else {
                 SelectStatement subquery = table.getAsSubquery(orderBy);
                 QueryPlan plan = compileSubquery(subquery);
@@ -225,9 +234,10 @@ public class QueryCompiler {
                 tableRef = plan.getTableRef();
                 context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
                 query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
+                tupleProjector = new TupleProjector(plan.getProjector());
             }
             context.setCurrentTable(tableRef);
-            PTableWrapper projectedTable = initialProjectedTable;
+            PTable projectedTable = initialProjectedTable;
             int count = joinSpecs.size();
             ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count];
             List<Expression>[] joinExpressions = new List[count];
@@ -235,9 +245,7 @@ public class QueryCompiler {
             PTable[] tables = new PTable[count];
             int[] fieldPositions = new int[count];
             HashSubPlan[] subPlans = new HashSubPlan[count];
-            fieldPositions[0] = projectedTable.getTable().getColumns().size() - projectedTable.getTable().getPKColumns().size();
-            boolean forceProjection = table.isSubselect();
-            boolean needsProject = forceProjection || asSubquery;
+            fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size();
             for (int i = 0; i < count; i++) {
                 JoinSpec joinSpec = joinSpecs.get(i);
                 Scan subScan = ScanUtil.newScan(originalScan);
@@ -245,17 +253,12 @@ public class QueryCompiler {
                 QueryPlan joinPlan = compileJoinQuery(subContext, binds, joinSpec.getJoinTable(), true, true, null);
                 boolean hasPostReference = joinSpec.getJoinTable().hasPostReference();
                 if (hasPostReference) {
-                    PTableWrapper subProjTable = ((JoinedTableColumnResolver) subContext.getResolver()).getPTableWrapper();
-                    tables[i] = subProjTable.getTable();
-                    projectedTable = projectedTable.mergeProjectedTables(subProjTable, joinSpec.getType());
-                    needsProject = true;
+                    tables[i] = subContext.getResolver().getTables().get(0).getTable();
+                    projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType());
                 } else {
                     tables[i] = null;
                 }
-                if (!starJoinVector[i]) {
-                    needsProject = true;
-                }
-                context.setResolver((!forceProjection && starJoinVector[i]) ? joinTable.getOriginalResolver() : projectedTable.createColumnResolver());
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
                 joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder
                 Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContext, true);
                 joinExpressions[i] = joinConditions.getFirst();
@@ -270,17 +273,14 @@ public class QueryCompiler {
                 }
                 subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression);
             }
-            if (needsProject) {
-                TupleProjector.serializeProjectorIntoScan(context.getScan(), initialProjectedTable.createTupleProjector());
-            }
-            context.setResolver(needsProject ? projectedTable.createColumnResolver() : joinTable.getOriginalResolver());
+            TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
             QueryPlan plan = compileSingleQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin());
             Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table);
             Integer limit = null;
             if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
                 limit = plan.getLimit();
             }
-            HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit, forceProjection);
+            HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit);
             return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans);
         }
 
@@ -296,16 +296,17 @@ public class QueryCompiler {
             Scan subScan = ScanUtil.newScan(originalScan);
             StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
             QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null);
-            PTableWrapper lhsProjTable = ((JoinedTableColumnResolver) lhsCtx.getResolver()).getPTableWrapper();
-            ProjectedPTableWrapper rhsProjTable;
+            PTable rhsProjTable;
             TableRef rhsTableRef;
             SelectStatement rhs;
+            TupleProjector tupleProjector;
             if (!rhsTable.isSubselect()) {
                 context.setCurrentTable(rhsTable.getTableRef());
                 rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context);
                 rhsTableRef = rhsTable.getTableRef();
                 rhsTable.projectColumns(context.getScan());
                 rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery);
+                tupleProjector = new TupleProjector(rhsProjTable);
             } else {
                 SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
                 QueryPlan plan = compileSubquery(subquery);
@@ -313,30 +314,27 @@ public class QueryCompiler {
                 rhsTableRef = plan.getTableRef();
                 context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
                 rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
+                tupleProjector = new TupleProjector(plan.getProjector());
             }
             context.setCurrentTable(rhsTableRef);
-            boolean forceProjection = rhsTable.isSubselect();
-            context.setResolver(forceProjection ? rhsProjTable.createColumnResolver() : joinTable.getOriginalResolver());
+            context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable));
             ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)};
             Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true);
             List<Expression> joinExpressions = joinConditions.getSecond();
             List<Expression> hashExpressions = joinConditions.getFirst();
             boolean needsMerge = lhsJoin.hasPostReference();
-            boolean needsProject = forceProjection || asSubquery || needsMerge;
-            PTable lhsTable = needsMerge ? lhsProjTable.getTable() : null;
-            int fieldPosition = needsMerge ? rhsProjTable.getTable().getColumns().size() - rhsProjTable.getTable().getPKColumns().size() : 0;
-            PTableWrapper projectedTable = needsMerge ? rhsProjTable.mergeProjectedTables(lhsProjTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
-            if (needsProject) {
-                TupleProjector.serializeProjectorIntoScan(context.getScan(), rhsProjTable.createTupleProjector());
-            }
-            context.setResolver(needsProject ? projectedTable.createColumnResolver() : joinTable.getOriginalResolver());
+            PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null;
+            int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0;
+            PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
+            TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
+            context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
             QueryPlan rhsPlan = compileSingleQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right);
             Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable);
             Integer limit = null;
             if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
                 limit = rhsPlan.getLimit();
             }
-            HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit, forceProjection);
+            HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit);
             Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
             getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);
             return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())});
@@ -362,28 +360,27 @@ public class QueryCompiler {
         StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement));
         boolean preserveRowkey = !projectPKColumns && type != JoinType.Full;
         QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy);
-        PTableWrapper lhsProjTable = ((JoinedTableColumnResolver) lhsCtx.getResolver()).getPTableWrapper();
+        PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable();
         boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty();
         
         Scan rhsScan = ScanUtil.newScan(originalScan);
         StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement));
         QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy);
-        PTableWrapper rhsProjTable = ((JoinedTableColumnResolver) rhsCtx.getResolver()).getPTableWrapper();
+        PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable();
         
         Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false);
         List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst();
         List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond();
         
         boolean needsMerge = rhsJoin.hasPostReference();
-        PTable rhsTable = needsMerge ? rhsProjTable.getTable() : null;
-        int fieldPosition = needsMerge ? lhsProjTable.getTable().getColumns().size() - lhsProjTable.getTable().getPKColumns().size() : 0;
-        PTableWrapper projectedTable = needsMerge ? lhsProjTable.mergeProjectedTables(rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
+        int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0;
+        PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
 
-        ColumnResolver resolver = projectedTable.createColumnResolver();
-        TableRef tableRef = ((JoinedTableColumnResolver) resolver).getTableRef();
+        ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable);
+        TableRef tableRef = resolver.getTables().get(0);
         StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement));
         subCtx.setCurrentTable(tableRef);
-        QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable.getTable(), lhsProjTable.getTable(), rhsTable, fieldPosition, lastJoinSpec.isSingleValueOnly());
+        QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly());
         context.setCurrentTable(tableRef);
         context.setResolver(resolver);
         TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
@@ -440,7 +437,7 @@ public class QueryCompiler {
         }
         int maxRows = this.statement.getMaxRows();
         this.statement.setMaxRows(0); // overwrite maxRows to avoid its impact on inner queries.
-        QueryPlan plan = new QueryCompiler(this.statement, subquery, resolver).compile();
+        QueryPlan plan = new QueryCompiler(this.statement, subquery, resolver, false).compile();
         plan = statement.getConnection().getQueryServices().getOptimizer().optimize(statement, plan);
         this.statement.setMaxRows(maxRows); // restore maxRows.
         return plan;
@@ -467,7 +464,14 @@ public class QueryCompiler {
     }
 
     protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, TupleProjector innerPlanTupleProjector, boolean isInRowKeyOrder) throws SQLException{
-        PhoenixConnection connection = statement.getConnection();
+        PTable projectedTable = null;
+        if (this.projectTuples) {
+            projectedTable = TupleProjectionCompiler.createProjectedTable(select, context);
+            if (projectedTable != null) {
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
+            }
+        }
+        
         ColumnResolver resolver = context.getResolver();
         TableRef tableRef = context.getCurrentTable();
         PTable table = tableRef.getTable();
@@ -485,15 +489,14 @@ public class QueryCompiler {
         Expression having = HavingCompiler.compile(context, select, groupBy);
         // Don't pass groupBy when building where clause expression, because we do not want to wrap these
         // expressions as group by key expressions since they're pre, not post filtered.
-        if (innerPlan == null) {
-            context.setResolver(FromCompiler.getResolverForQuery(select, connection));
+        if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
+            context.setResolver(FromCompiler.getResolverForQuery(select, this.statement.getConnection()));
         }
         Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode> newHashSet();
         Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
         context.setResolver(resolver); // recover resolver
         OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, isInRowKeyOrder); 
         RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns);
-
         // Final step is to build the query plan
         if (!asSubquery) {
             int maxRows = statement.getMaxRows();
@@ -506,6 +509,10 @@ public class QueryCompiler {
             }
         }
 
+        if (projectedTable != null) {
+            TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
+        }
+        
         QueryPlan plan = innerPlan;
         if (plan == null) {
             ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
new file mode 100644
index 0000000..72e2a26
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.phoenix.execute.TupleProjector;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.FamilyWildcardParseNode;
+import org.apache.phoenix.parse.OrderByNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.WildcardParseNode;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.LocalIndexDataColumnRef;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+import com.google.common.base.Preconditions;
+
+public class TupleProjectionCompiler {
+    public static final PName PROJECTED_TABLE_SCHEMA = PNameFactory.newName(".");
+    private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory();
+    
+    public static PTable createProjectedTable(SelectStatement select, StatementContext context) throws SQLException {
+        Preconditions.checkArgument(!select.isJoin());
+        // Non-group-by or group-by aggregations will create its own projected result.
+        if (select.getInnerSelectStatement() != null 
+                || select.isAggregate() 
+                || select.isDistinct()
+                || (context.getResolver().getTables().get(0).getTable().getType() != PTableType.TABLE
+                && context.getResolver().getTables().get(0).getTable().getType() != PTableType.INDEX && context.getResolver().getTables().get(0).getTable().getType() != PTableType.VIEW))
+            return null;
+        
+        List<PColumn> projectedColumns = new ArrayList<PColumn>();
+        boolean isWildcard = false;
+        Set<String> families = new HashSet<String>();
+        ColumnRefVisitor visitor = new ColumnRefVisitor(context);
+        TableRef tableRef = context.getCurrentTable();
+        PTable table = tableRef.getTable();
+
+        for (AliasedNode aliasedNode : select.getSelect()) {
+            ParseNode node = aliasedNode.getNode();
+            if (node instanceof WildcardParseNode) {
+                if (((WildcardParseNode) node).isRewrite()) {
+                    TableRef parentTableRef = FromCompiler.getResolver(
+                            NODE_FACTORY.namedTable(null, TableName.create(table.getSchemaName().getString(), 
+                                    table.getParentTableName().getString())), context.getConnection()).resolveTable(
+                            table.getSchemaName().getString(),
+                            table.getParentTableName().getString());
+                    for (PColumn column : parentTableRef.getTable().getColumns()) {
+                        NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor);
+                    }
+                }
+                isWildcard = true;
+            } else if (node instanceof FamilyWildcardParseNode) {
+                FamilyWildcardParseNode familyWildcardNode = (FamilyWildcardParseNode) node;
+                String familyName = familyWildcardNode.getName();
+                if (familyWildcardNode.isRewrite()) {
+                    TableRef parentTableRef = FromCompiler.getResolver(
+                            NODE_FACTORY.namedTable(null, TableName.create(table.getSchemaName().getString(), 
+                                    table.getParentTableName().getString())), context.getConnection()).resolveTable(
+                            table.getSchemaName().getString(),
+                            table.getParentTableName().getString());
+                    for (PColumn column : parentTableRef.getTable().getColumnFamily(familyName).getColumns()) {
+                        NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor);
+                    }
+                }
+                families.add(familyName);
+            } else {
+                node.accept(visitor);
+            }
+        }
+        if (!isWildcard) {
+            for (OrderByNode orderBy : select.getOrderBy()) {
+                orderBy.getNode().accept(visitor);
+            }
+        }
+
+        boolean hasSaltingColumn = table.getBucketNum() != null;
+        int position = hasSaltingColumn ? 1 : 0;
+        // Always project PK columns first in case there are some PK columns added by alter table.
+        for (int i = position; i < table.getPKColumns().size(); i++) {
+            PColumn sourceColumn = table.getPKColumns().get(i);
+            ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition());
+            PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), 
+                    position++, sourceColumn.isNullable(), sourceColumnRef);
+            projectedColumns.add(column);
+        }
+        for (PColumn sourceColumn : table.getColumns()) {
+            if (SchemaUtil.isPKColumn(sourceColumn))
+                continue;
+            ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition());
+            if (!isWildcard 
+                    && !visitor.columnRefSet.contains(sourceColumnRef)
+                    && !families.contains(sourceColumn.getFamilyName().getString()))
+                continue;
+            PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), 
+                    position++, sourceColumn.isNullable(), sourceColumnRef);
+            projectedColumns.add(column);
+            // Wildcard or FamilyWildcard will be handled by ProjectionCompiler.
+            if (!isWildcard && !families.contains(sourceColumn.getFamilyName())) {
+                context.getScan().addColumn(sourceColumn.getFamilyName().getBytes(), sourceColumn.getName().getBytes());
+            }
+        }
+        // add LocalIndexDataColumnRef
+        for (LocalIndexDataColumnRef sourceColumnRef : visitor.localIndexColumnRefSet) {
+            PColumn column = new ProjectedColumn(sourceColumnRef.getColumn().getName(), 
+                    sourceColumnRef.getColumn().getFamilyName(), position++, 
+                    sourceColumnRef.getColumn().isNullable(), sourceColumnRef);
+            projectedColumns.add(column);
+        }
+        
+        return PTableImpl.makePTable(table.getTenantId(), table.getSchemaName(), table.getName(), PTableType.PROJECTED,
+                table.getIndexState(), table.getTimeStamp(), table.getSequenceNumber(), table.getPKName(),
+                table.getBucketNum(), projectedColumns, table.getParentSchemaName(),
+                table.getParentName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName>emptyList(), null, null,
+                table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
+                table.getIndexType());
+    }
+
+    public static PTable createProjectedTable(TableRef tableRef, List<ColumnRef> sourceColumnRefs, boolean retainPKColumns) throws SQLException {
+        PTable table = tableRef.getTable();
+        boolean hasSaltingColumn = retainPKColumns && table.getBucketNum() != null;
+        List<PColumn> projectedColumns = new ArrayList<PColumn>();
+        int position = hasSaltingColumn ? 1 : 0;
+        for (int i = position; i < sourceColumnRefs.size(); i++) {
+            ColumnRef sourceColumnRef = sourceColumnRefs.get(i);
+            PColumn sourceColumn = sourceColumnRef.getColumn();
+            String colName = sourceColumn.getName().getString();
+            String aliasedName = tableRef.getTableAlias() == null ? 
+                      SchemaUtil.getColumnName(table.getName().getString(), colName) 
+                    : SchemaUtil.getColumnName(tableRef.getTableAlias(), colName);
+
+            PColumn column = new ProjectedColumn(PNameFactory.newName(aliasedName), 
+                    retainPKColumns && SchemaUtil.isPKColumn(sourceColumn) ? 
+                            null : PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), 
+                    position++, sourceColumn.isNullable(), sourceColumnRef);
+            projectedColumns.add(column);
+        }
+        return PTableImpl.makePTable(table.getTenantId(), PROJECTED_TABLE_SCHEMA, table.getName(), PTableType.PROJECTED,
+                    null, table.getTimeStamp(), table.getSequenceNumber(), table.getPKName(),
+                    retainPKColumns ? table.getBucketNum() : null, projectedColumns, null,
+                    null, Collections.<PTable>emptyList(), table.isImmutableRows(), Collections.<PName>emptyList(), null, null,
+                    table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
+                    null);
+    }
+
+    // For extracting column references from single select statement
+    private static class ColumnRefVisitor extends StatelessTraverseAllParseNodeVisitor {
+        private final StatementContext context;
+        private final Set<ColumnRef> columnRefSet;
+        private final Set<LocalIndexDataColumnRef> localIndexColumnRefSet;
+        
+        private ColumnRefVisitor(StatementContext context) {
+            this.context = context;
+            this.columnRefSet = new HashSet<ColumnRef>();
+            this.localIndexColumnRefSet = new HashSet<LocalIndexDataColumnRef>();
+        }
+
+        @Override
+        public Void visit(ColumnParseNode node) throws SQLException {
+            try {
+                columnRefSet.add(context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()));
+            } catch (ColumnNotFoundException e) {
+                if (context.getCurrentTable().getTable().getIndexType() == IndexType.LOCAL) {
+                    try {
+                        localIndexColumnRefSet.add(new LocalIndexDataColumnRef(context, node.getName()));
+                    } catch (ColumnFamilyNotFoundException c) {
+                        throw e;
+                    }
+                } else {
+                    throw e;
+                }
+            }
+            return null;
+        }        
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index b21cc2f..8a76564 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -419,11 +419,11 @@ public class UpsertCompiler {
                     // Pass scan through if same table in upsert and select so that projection is computed correctly
                     // Use optimizer to choose the best plan
                     try {
-                        QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement));
+                        QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), false);
                         queryPlanToBe = compiler.compile();
                         // This is post-fix: if the tableRef is a projected table, this means there are post-processing 
                         // steps and parallelIteratorFactory did not take effect.
-                        if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.JOIN || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
+                        if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
                             parallelIteratorFactoryToBe = null;
                         }
                     } catch (MetaDataEntityNotFoundException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index 406b567..9631850 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -145,7 +145,7 @@ public class WhereCompiler {
             expression = AndExpression.create(filters);
         }
         
-        if (context.getCurrentTable().getTable().getType() != PTableType.JOIN && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
+        if (context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
             expression = WhereOptimizer.pushKeyExpressionsToScan(context, statement, expression, extractedNodes);
         }
         setScanFilter(context, statement, expression, whereCompiler.disambiguateWithFamily, hashJoinOptimization);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 69cdcb6..25ac408 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -47,6 +48,8 @@ import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -216,9 +219,10 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final RegionScanner s, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
             final HRegion dataRegion, final IndexMaintainer indexMaintainer,
-            final byte[][] viewConstants, final ImmutableBytesWritable ptr) {
+            final byte[][] viewConstants, final TupleProjector projector,
+            final ImmutableBytesWritable ptr) {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
-                dataRegion, indexMaintainer, viewConstants, null, null, ptr);
+                dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr);
     }
     
     /**
@@ -241,7 +245,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
             final HRegion dataRegion, final IndexMaintainer indexMaintainer,
             final byte[][] viewConstants, final KeyValueSchema kvSchema, 
-            final ValueBitSet kvSchemaBitSet, final ImmutableBytesWritable ptr) {
+            final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
+            final ImmutableBytesWritable ptr) {
         return new RegionScanner() {
 
             @Override
@@ -303,6 +308,11 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                         IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
                             tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
                     }
+                    if (projector != null) {
+                        Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+                        result.clear();
+                        result.add(tuple.getValue(0));
+                    }
                     // There is a scanattribute set to retrieve the specific array element
                     return next;
                 } catch (Throwable t) {
@@ -325,6 +335,11 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                         IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
                             tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
                     }
+                    if (projector != null) {
+                        Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+                        result.clear();
+                        result.add(tuple.getValue(0));
+                    }
                     // There is a scanattribute set to retrieve the specific array element
                     return next;
                 } catch (Throwable t) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 0984b06..1f1ba36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -131,7 +131,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         HRegion dataRegion = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
-        if (ScanUtil.isLocalIndex(scan)) {
+
+        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
+        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+        if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
             if (dataColumns != null) {
                 tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
                 dataRegion = IndexUtil.getDataRegion(c.getEnvironment());
@@ -140,12 +143,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             innerScanner =
                     getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
-                            dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, tempPtr);
+                            dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
         } 
 
-        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
-        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
-        if (p != null || j != null) {
+        if (j != null) {
             innerScanner =
                     new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan),
                             c.getEnvironment());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 176520e..cdfc771 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -70,39 +70,37 @@ public class HashJoinRegionScanner implements RegionScanner {
         this.hasMore = true;
         this.count = 0;
         this.limit = Long.MAX_VALUE;
-        if (joinInfo != null) {
-            for (JoinType type : joinInfo.getJoinTypes()) {
-                if (type != JoinType.Inner && type != JoinType.Left && type != JoinType.Semi && type != JoinType.Anti)
-                    throw new DoNotRetryIOException("Got join type '" + type + "'. Expect only INNER or LEFT with hash-joins.");
-            }
-            if (joinInfo.getLimit() != null) {
-                this.limit = joinInfo.getLimit();
-            }
-            int count = joinInfo.getJoinIds().length;
-            this.tempTuples = new List[count];
-            this.hashCaches = new HashCache[count];
-            this.tempSrcBitSet = new ValueBitSet[count];
-            TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
-            for (int i = 0; i < count; i++) {
-                ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i];
-                if (joinId.getLength() == 0) { // semi-join optimized into skip-scan
-                    hashCaches[i] = null;
-                    tempSrcBitSet[i] = null;
-                    tempTuples[i] = null;
-                    continue;
-                }
-                HashCache hashCache = (HashCache)cache.getServerCache(joinId);
-                if (hashCache == null)
-                    throw new DoNotRetryIOException("Could not find hash cache for joinId: " 
-                            + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength()) 
-                            + ". The cache might have expired and have been removed.");
-                hashCaches[i] = hashCache;
-                tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]);
-            }
-            if (this.projector != null) {
-                this.tempDestBitSet = ValueBitSet.newInstance(joinInfo.getJoinedSchema());
-                this.projector.setValueBitSet(tempDestBitSet);
+        for (JoinType type : joinInfo.getJoinTypes()) {
+            if (type != JoinType.Inner && type != JoinType.Left && type != JoinType.Semi && type != JoinType.Anti)
+                throw new DoNotRetryIOException("Got join type '" + type + "'. Expect only INNER or LEFT with hash-joins.");
+        }
+        if (joinInfo.getLimit() != null) {
+            this.limit = joinInfo.getLimit();
+        }
+        int count = joinInfo.getJoinIds().length;
+        this.tempTuples = new List[count];
+        this.hashCaches = new HashCache[count];
+        this.tempSrcBitSet = new ValueBitSet[count];
+        TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
+        for (int i = 0; i < count; i++) {
+            ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i];
+            if (joinId.getLength() == 0) { // semi-join optimized into skip-scan
+                hashCaches[i] = null;
+                tempSrcBitSet[i] = null;
+                tempTuples[i] = null;
+                continue;
             }
+            HashCache hashCache = (HashCache)cache.getServerCache(joinId);
+            if (hashCache == null)
+                throw new DoNotRetryIOException("Could not find hash cache for joinId: " 
+                        + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength()) 
+                        + ". The cache might have expired and have been removed.");
+            hashCaches[i] = hashCache;
+            tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]);
+        }
+        if (this.projector != null) {
+            this.tempDestBitSet = ValueBitSet.newInstance(joinInfo.getJoinedSchema());
+            this.projector.setValueBitSet(tempDestBitSet);
         }
     }
     
@@ -111,13 +109,11 @@ public class HashJoinRegionScanner implements RegionScanner {
             return;
         
         Tuple tuple = new ResultTuple(Result.create(result));
-        if (joinInfo == null || joinInfo.forceProjection()) {
+        // For backward compatibility. In new versions, HashJoinInfo.forceProjection()
+        // always returns true.
+        if (joinInfo.forceProjection()) {
             tuple = projector.projectResults(tuple);
         }
-        if (joinInfo == null) {
-            resultQueue.offer(tuple);
-            return;
-        }
         
         if (hasBatchLimit)
             throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
@@ -147,7 +143,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                 }
             } else {
                 KeyValueSchema schema = joinInfo.getJoinedSchema();
-                if (!joinInfo.forceProjection()) {
+                if (!joinInfo.forceProjection()) { // backward compatibility
                     tuple = projector.projectResults(tuple);
                 }
                 resultQueue.offer(tuple);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 9270495..ddde407 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -199,15 +199,16 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             indexMaintainer = indexMaintainers.get(0);
             viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
         }
+        
+        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
+        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
         innerScanner =
                 getWrappedScanner(c, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan,
                     dataColumns, tupleProjector, dataRegion, indexMaintainer, viewConstants,
-                    kvSchema, kvSchemaBitSet, ptr);
+                    kvSchema, kvSchemaBitSet, j == null ? p : null, ptr);
 
-        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
-        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
         final ImmutableBytesWritable tenantId = ScanUtil.getTenantId(scan);
-        if (p != null || j != null) {
+        if (j != null) {
             innerScanner = new HashJoinRegionScanner(innerScanner, p, j, tenantId, c.getEnvironment());
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 71c4dc6..72a0a64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -214,7 +214,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
         boolean localIndexScan = ScanUtil.isLocalIndex(scan);
-        if (localIndexScan && !isDelete) {
+        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
+        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+        if ((localIndexScan && !isDelete) || (j == null && p != null)) {
             if (dataColumns != null) {
                 tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
                 dataRegion = IndexUtil.getDataRegion(c.getEnvironment());
@@ -223,12 +225,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             theScanner =
                     getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, 
-                            dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, tempPtr);
+                            dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
         } 
         
-        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
-        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
-        if (p != null || j != null)  {
+        if (j != null)  {
             theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment());
         }
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
index 77682e4..a4728e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
@@ -32,19 +32,23 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.compile.ColumnProjector;
-import org.apache.phoenix.compile.JoinCompiler.ProjectedPTableWrapper;
 import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
 import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
 import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.BaseTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import com.google.common.base.Preconditions;
+
 public class TupleProjector {    
     public static final byte[] VALUE_COLUMN_FAMILY = Bytes.toBytes("_v");
     public static final byte[] VALUE_COLUMN_QUALIFIER = new byte[0];
@@ -70,16 +74,16 @@ public class TupleProjector {
         valueSet = ValueBitSet.newInstance(schema);
     }
     
-    public TupleProjector(ProjectedPTableWrapper projected) {
-    	List<PColumn> columns = projected.getTable().getColumns();
-    	expressions = new Expression[columns.size() - projected.getTable().getPKColumns().size()];
-    	// we do not count minNullableIndex for we might do later merge.
+    public TupleProjector(PTable projectedTable) {
+        Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED);
+    	List<PColumn> columns = projectedTable.getColumns();
+    	this.expressions = new Expression[columns.size() - projectedTable.getPKColumns().size()];
     	KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
     	int i = 0;
-        for (PColumn column : projected.getTable().getColumns()) {
+        for (PColumn column : columns) {
         	if (!SchemaUtil.isPKColumn(column)) {
         		builder.addField(column);
-        		expressions[i++] = projected.getSourceExpression(column);
+        		expressions[i++] = ((ProjectedColumn) column).getSourceColumnRef().newColumnExpression();
         	}
         }
         schema = builder.build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
index ad96061..ea78671 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
@@ -50,10 +50,9 @@ public class HashJoinInfo {
     private int[] fieldPositions;
     private Expression postJoinFilterExpression;
     private Integer limit;
-    private boolean forceProjection;
 
-    public HashJoinInfo(PTable joinedTable, ImmutableBytesPtr[] joinIds, List<Expression>[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, PTable[] tables, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit, boolean forceProjection) {
-    	this(buildSchema(joinedTable), joinIds, joinExpressions, joinTypes, earlyEvaluation, buildSchemas(tables), fieldPositions, postJoinFilterExpression, limit, forceProjection);
+    public HashJoinInfo(PTable joinedTable, ImmutableBytesPtr[] joinIds, List<Expression>[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, PTable[] tables, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit) {
+    	this(buildSchema(joinedTable), joinIds, joinExpressions, joinTypes, earlyEvaluation, buildSchemas(tables), fieldPositions, postJoinFilterExpression, limit);
     }
 
     private static KeyValueSchema[] buildSchemas(PTable[] tables) {
@@ -76,7 +75,7 @@ public class HashJoinInfo {
         return builder.build();
     }
 
-    private HashJoinInfo(KeyValueSchema joinedSchema, ImmutableBytesPtr[] joinIds, List<Expression>[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, KeyValueSchema[] schemas, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit, boolean forceProjection) {
+    private HashJoinInfo(KeyValueSchema joinedSchema, ImmutableBytesPtr[] joinIds, List<Expression>[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, KeyValueSchema[] schemas, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit) {
     	this.joinedSchema = joinedSchema;
     	this.joinIds = joinIds;
         this.joinExpressions = joinExpressions;
@@ -86,7 +85,6 @@ public class HashJoinInfo {
         this.fieldPositions = fieldPositions;
         this.postJoinFilterExpression = postJoinFilterExpression;
         this.limit = limit;
-        this.forceProjection = forceProjection;
     }
 
     public KeyValueSchema getJoinedSchema() {
@@ -124,15 +122,11 @@ public class HashJoinInfo {
     public Integer getLimit() {
         return limit;
     }
-
-    /*
-     * If the LHS table is a sub-select, we always do projection, since
-     * the ON expressions reference only projected columns.
-     */
+    
     public boolean forceProjection() {
-        return forceProjection;
+        return true;
     }
-
+ 
     public static void serializeHashJoinIntoScan(Scan scan, HashJoinInfo joinInfo) {
         ByteArrayOutputStream stream = new ByteArrayOutputStream();
         try {
@@ -159,7 +153,7 @@ public class HashJoinInfo {
                 WritableUtils.writeVInt(output, -1);
             }
             WritableUtils.writeVInt(output, joinInfo.limit == null ? -1 : joinInfo.limit);
-            output.writeBoolean(joinInfo.forceProjection);
+            output.writeBoolean(true);
             scan.setAttribute(HASH_JOIN, stream.toByteArray());
         } catch (IOException e) {
             throw new RuntimeException(e);
@@ -216,17 +210,16 @@ public class HashJoinInfo {
                 postJoinFilterExpression.readFields(input);
             }
             int limit = -1;
-            boolean forceProjection = false;
             // Read these and ignore if we don't find them as they were not
             // present in Apache Phoenix 3.0.0 release. This allows a newer
             // 3.1 server to work with an older 3.0 client without force
             // both to be upgraded in lock step.
             try {
                 limit = WritableUtils.readVInt(input);
-                forceProjection = input.readBoolean();
+                input.readBoolean(); // discarded info in new versions
             } catch (EOFException ignore) {
             }
-            return new HashJoinInfo(joinedSchema, joinIds, joinExpressions, joinTypes, earlyEvaluation, schemas, fieldPositions, postJoinFilterExpression, limit >= 0 ? limit : null, forceProjection);
+            return new HashJoinInfo(joinedSchema, joinIds, joinExpressions, joinTypes, earlyEvaluation, schemas, fieldPositions, postJoinFilterExpression, limit >= 0 ? limit : null);
         } catch (IOException e) {
             throw new RuntimeException(e);
         } finally {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index a51723b..382bba5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -113,14 +113,13 @@ public class QueryOptimizer {
         SelectStatement select = (SelectStatement)dataPlan.getStatement();
         // Exit early if we have a point lookup as we can't get better than that
         if (!useIndexes 
-                || select.isJoin() 
-                || dataPlan.getContext().getResolver().getTables().size() > 1
-                || select.getInnerSelectStatement() != null
                 || (dataPlan.getContext().getScanRanges().isPointLookup() && stopAtBestPlan)) {
             return Collections.singletonList(dataPlan);
         }
-        PTable dataTable = dataPlan.getTableRef().getTable();
-        List<PTable>indexes = Lists.newArrayList(dataTable.getIndexes());
+        // For single query tuple projection, indexes are inherited from the original table to the projected
+        // table; otherwise not. So we pass projected table here, which is enough to tell if this is from a
+        // single query or a part of join query.
+        List<PTable>indexes = Lists.newArrayList(dataPlan.getContext().getResolver().getTables().get(0).getTable().getIndexes());
         if (indexes.isEmpty() || dataPlan.isDegenerate() || dataPlan.getTableRef().hasDynamicCols() || select.getHint().hasHint(Hint.NO_INDEX)) {
             return Collections.singletonList(dataPlan);
         }
@@ -138,7 +137,7 @@ public class QueryOptimizer {
             targetColumns = targetDatums;
         }
         
-        SelectStatement translatedIndexSelect = IndexStatementRewriter.translate(select, dataPlan.getContext().getResolver());
+        SelectStatement translatedIndexSelect = IndexStatementRewriter.translate(select, FromCompiler.getResolver(dataPlan.getTableRef()));
         List<QueryPlan> plans = Lists.newArrayListWithExpectedSize(1 + indexes.size());
         plans.add(dataPlan);
         QueryPlan hintedPlan = getHintedQueryPlan(statement, translatedIndexSelect, indexes, targetColumns, parallelIteratorFactory, plans);
@@ -230,12 +229,14 @@ public class QueryOptimizer {
         TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName));
         SelectStatement indexSelect = FACTORY.select(select, table);
         ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
+        // We will or will not do tuple projection according to the data plan.
+        boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
         // Check index state of now potentially updated index table to make sure it's active
         if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) {
             try {
             	// translate nodes that match expressions that are indexed to the associated column parse node
                 indexSelect = ParseNodeRewriter.rewrite(indexSelect, new  IndexExpressionParseNodeRewriter(index, statement.getConnection()));
-                QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager());
+                QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected);
                 
                 QueryPlan plan = compiler.compile();
                 // If query doesn't have where clause and some of columns to project are missing
@@ -267,7 +268,7 @@ public class QueryOptimizer {
                 ParseNode where = dataSelect.getWhere();
                 if (isHinted && where != null) {
                     StatementContext context = new StatementContext(statement, resolver);
-                    WhereConditionRewriter whereRewriter = new WhereConditionRewriter(dataPlan.getContext().getResolver(), context);
+                    WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
                     where = where.accept(whereRewriter);
                     if (where != null) {
                         PTable dataTable = dataPlan.getTableRef().getTable();
@@ -301,7 +302,7 @@ public class QueryOptimizer {
                         query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());
                         queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                         query = StatementNormalizer.normalize(query, queryResolver);
-                        QueryPlan plan = new QueryCompiler(statement, query, queryResolver).compile();
+                        QueryPlan plan = new QueryCompiler(statement, query, queryResolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected).compile();
                         return plan;
                     }
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
index c6dd1f4..76f6218 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
@@ -105,7 +105,7 @@ public class ColumnRef {
                     displayName);
         }
         
-        if (table.getType() == PTableType.JOIN || table.getType() == PTableType.SUBQUERY) {
+        if (table.getType() == PTableType.PROJECTED || table.getType() == PTableType.SUBQUERY) {
         	return new ProjectedColumnExpression(column, table, displayName);
         }
        

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
index 62ef431..270c66d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
@@ -20,16 +20,13 @@ package org.apache.phoenix.schema;
 import java.sql.SQLException;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.expression.ColumnExpression;
 import org.apache.phoenix.expression.ProjectedColumnExpression;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.TableName;
-import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.SchemaUtil;
 
 public class LocalIndexDataColumnRef extends ColumnRef {
     final private int position;
@@ -62,11 +59,7 @@ public class LocalIndexDataColumnRef extends ColumnRef {
 
     @Override
     public ColumnExpression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
-        PTable table = this.getTable();
-        PColumn column = this.getColumn();
-        // TODO: util for this or store in member variable
-        byte[] defaultFamily = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : table.getDefaultFamilyName().getBytes();
-        String displayName = SchemaUtil.getColumnDisplayName(Bytes.compareTo(defaultFamily, column.getFamilyName().getBytes()) == 0  ? null : column.getFamilyName().getBytes(), column.getName().getBytes());
+        String displayName = this.getTableRef().getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive);
         return new ProjectedColumnExpression(this.getColumn(), columns, position, displayName);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 831616b..e133433 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -764,7 +764,7 @@ public class MetaDataClient {
                 String tableName = getFullTableName(dataTableRef);
                 String query = "SELECT count(*) FROM " + tableName;
                 final QueryPlan plan = statement.compileQuery(query);
-                TableRef tableRef = plan.getContext().getResolver().getTables().get(0);
+                TableRef tableRef = plan.getTableRef();
                 // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
                 // We'll detect that this attribute was set the server-side and write the index
                 // rows per region as a result. The value of the attribute will be our persisted

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableType.java
index 23ba829..019c0e1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableType.java
@@ -27,7 +27,7 @@ public enum PTableType {
     TABLE("u", "TABLE"),
     VIEW("v", "VIEW"),
     INDEX("i", "INDEX"),
-    JOIN("j", "JOIN"),
+    PROJECTED("p", "PROJECTED"),
     SUBQUERY("q", "SUBQUERY"); 
 
     private final PName value;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java
new file mode 100644
index 0000000..19dd1c1
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+public class ProjectedColumn extends DelegateColumn {
+    
+    private final PName name;
+    private final PName familyName;
+    private final int position;
+    private final boolean nullable;
+    private final ColumnRef sourceColumnRef;
+
+    public ProjectedColumn(PName name, PName familyName, int position, boolean nullable, ColumnRef sourceColumnRef) {
+        super(sourceColumnRef.getColumn());
+        this.name = name;
+        this.familyName = familyName;
+        this.position = position;
+        this.nullable = nullable;
+        this.sourceColumnRef = sourceColumnRef;
+    }
+    
+    @Override
+    public PName getName() {
+        return name;
+    }
+    
+    public PName getFamilyName() {
+        return familyName;
+    }
+    
+    @Override
+    public int getPosition() {
+        return position;
+    }
+    
+    @Override
+    public boolean isNullable() {
+        return nullable;
+    }
+
+    public ColumnRef getSourceColumnRef() {
+        return sourceColumnRef;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index b64912b..bd88770 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.schema;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.compile.TupleProjectionCompiler;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -73,9 +74,10 @@ public class TableRef {
         String cq = null;       
         PColumn column = ref.getColumn();
         String name = column.getName().getString();
-        boolean isIndex = table.getType() == PTableType.INDEX;
-        if (table.getType() == PTableType.JOIN || table.getType() == PTableType.SUBQUERY) {
-            cq = column.getName().getString();
+        boolean isIndex = IndexUtil.isIndexColumn(name);
+        if ((table.getType() == PTableType.PROJECTED && TupleProjectionCompiler.PROJECTED_TABLE_SCHEMA.equals(table.getSchemaName()))
+                || table.getType() == PTableType.SUBQUERY) {
+            cq = name;
         }
         else if (SchemaUtil.isPKColumn(column)) {
             cq = isIndex ? IndexUtil.getDataColumnName(name) : name;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f829751/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
index 01f28ae..3a012fb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
@@ -619,7 +619,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
                     pointRange(tenantId1),
                     pointRange(tenantId2),
                     pointRange(tenantId3))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                plan.getTableRef().getTable().getRowKeySchema()),
             filter);
     }
 
@@ -642,7 +642,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
                     pointRange(tenantId1),
                     pointRange(tenantId2),
                     pointRange(tenantId3))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                plan.getTableRef().getTable().getRowKeySchema()),
             filter);
 
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1);
@@ -705,7 +705,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
                         true,
                         Bytes.toBytes(entityId2),
                         true))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                plan.getTableRef().getTable().getRowKeySchema()),
             filter);
     }
 


[20/50] [abbrv] phoenix git commit: PHOENIX-1646 Views and functional index expressions may lose information when stringified

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 866365a..bf599ae 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.io.StringReader;
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
@@ -29,24 +30,43 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.schema.SortOrder;
 import org.junit.Test;
 
 
 public class QueryParserTest {
+    private void parseQuery(String sql) throws IOException, SQLException {
+        SQLParser parser = new SQLParser(new StringReader(sql));
+        BindableStatement stmt = null;
+        stmt = parser.parseStatement();
+        if (stmt.getOperation() != Operation.QUERY) {
+            return;
+        }
+        String newSQL = stmt.toString();
+        SQLParser newParser = new SQLParser(new StringReader(newSQL));
+        BindableStatement newStmt = null;
+        try {
+            newStmt = newParser.parseStatement();
+        } catch (SQLException e) {
+            fail("Unable to parse new:\n" + newSQL);
+        }
+        assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt);
+    }
+    
     @Test
     public void testParsePreQuery0() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select a from b\n" +
             "where ((ind.name = 'X')" +
             "and rownum <= (1000 + 1000))\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParsePreQuery1() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" +
             "where( (ind.name = 'X'\n" +
             "and rownum <= 1 + 2)\n" +
@@ -54,12 +74,12 @@ public class QueryParserTest {
             "and (ind.key_prefix = '00T')\n" +
             "and (ind.name_type = 't'))"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParsePreQuery2() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + 
             "where (ind.string_value in ('a', 'b', 'c', 'd'))\n" + 
             "and rownum <= ( 3 + 1 )\n" + 
@@ -68,12 +88,12 @@ public class QueryParserTest {
             "and (ind.deleted = '0')\n" + 
             "and (ind.index_num = 1)"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParsePreQuery3() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + 
             "where (ind.number_value > 3)\n" + 
             "and rownum <= 1000\n" + 
@@ -82,54 +102,54 @@ public class QueryParserTest {
             "and (ind.deleted = '0'))\n" + 
             "and (ind.index_num = 2)"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParsePreQuery4() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*+ index(t iecustom_entity_data_created) */ /*gatherSlowStats*/ count(1) from core.custom_entity_data t\n" + 
             "where (t.created_date > to_date('01/01/2001'))\n" + 
             "and rownum <= 4500\n" + 
             "and (t.organization_id = '000000000000000')\n" + 
             "and (t.key_prefix = '001')"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testCountDistinctQuery() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
                 "select count(distinct foo) from core.custom_entity_data t\n"
                         + "where (t.created_date > to_date('01/01/2001'))\n"
                         + "and (t.organization_id = '000000000000000')\n"
                         + "and (t.key_prefix = '001')\n" + "limit 4500"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testIsNullQuery() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select count(foo) from core.custom_entity_data t\n" + 
             "where (t.created_date is null)\n" + 
             "and (t.organization_id is not null)\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testAsInColumnAlias() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select count(foo) AS c from core.custom_entity_data t\n" + 
             "where (t.created_date is null)\n" + 
             "and (t.organization_id is not null)\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParseJoin1() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*SOQL*/ \"Id\"\n" + 
             "from (select /*+ ordered index(cft) */\n" + 
             "cft.val188 \"Marketing_Offer_Code__c\",\n" + 
@@ -143,12 +163,12 @@ public class QueryParserTest {
             "and (t.account_id != '000000000000000'))\n" + 
             "where (\"Marketing_Offer_Code__c\" = 'FSCR')"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParseJoin2() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*rptacctlist 00O40000002C3of*/ \"00N40000001M8VK\",\n" + 
             "\"00N40000001M8VK.ID\",\n" + 
             "\"00N30000000r0K2\",\n" + 
@@ -175,12 +195,12 @@ public class QueryParserTest {
             "AND (\"00N40000001M8VK\" is null or \"00N40000001M8VK\" in ('BRIAN IRWIN', 'BRIAN MILLER', 'COLLEEN HORNYAK', 'ERNIE ZAVORAL JR', 'JAMIE TRIMBUR', 'JOE ANTESBERGER', 'MICHAEL HYTLA', 'NATHAN DELSIGNORE', 'SANJAY GANDHI', 'TOM BASHIOUM'))\n" + 
             "AND (\"LAST_UPDATE\" >= to_date('2009-08-01 07:00:00'))"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
     
     @Test
     public void testNegative1() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ count(1) core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -189,7 +209,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't')"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode());
@@ -198,7 +218,7 @@ public class QueryParserTest {
 
     @Test
     public void testNegative2() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "seelect /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -207,7 +227,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't')"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"seelect\" at line 1, column 1."));
@@ -216,7 +236,7 @@ public class QueryParserTest {
 
     @Test
     public void testNegative3() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -225,7 +245,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't'))"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"EOF\", got \")\" at line 6, column 26."));
@@ -234,7 +254,7 @@ public class QueryParserTest {
 
     @Test
     public void testNegativeCountDistinct() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -243,7 +263,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't')"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLFeatureNotSupportedException e) {
             // expected
@@ -252,7 +272,7 @@ public class QueryParserTest {
 
     @Test
     public void testNegativeCountStar() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ max(*) from core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -261,7 +281,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't')"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"*\" at line 1, column 32."));
@@ -270,7 +290,7 @@ public class QueryParserTest {
 
     @Test
     public void testUnknownFunction() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ bogus_function(ind.key_prefix) from core.search_name_lookup ind\n" +
             "where (ind.name = 'X')\n" +
             "and rownum <= 2000\n" +
@@ -279,7 +299,7 @@ public class QueryParserTest {
             "and (ind.name_type = 't')"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.UNKNOWN_FUNCTION.getErrorCode(), e.getErrorCode());
@@ -288,12 +308,12 @@ public class QueryParserTest {
 
     @Test
     public void testNegativeNonBooleanWhere() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" +
             "where 1"
             ));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLFeatureNotSupportedException e) {
             // expected
@@ -302,48 +322,48 @@ public class QueryParserTest {
     
     @Test
     public void testCommentQuery() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select a from b -- here we come\n" +
             "where ((ind.name = 'X') // to save the day\n" +
             "and rownum /* won't run */ <= (1000 + 1000))\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testQuoteEscapeQuery() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select a from b\n" +
             "where ind.name = 'X''Y'\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testSubtractionInSelect() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
             "select a, 3-1-2, -4- -1-1 from b\n" +
             "where d = c - 1\n"
             ));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testParsingStatementWithMispellToken() throws Exception {
         try {
-            SQLParser parser = new SQLParser(new StringReader(
+            String sql = ((
                     "selects a from b\n" +
                     "where e = d\n"));
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught exception.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"selects\" at line 1, column 1."));
         }
         try {
-            SQLParser parser = new SQLParser(new StringReader(
+            String sql = ((
                     "select a froms b\n" +
                     "where e = d\n"));
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught exception.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 602 (42P00): Syntax error. Missing \"FROM\" at line 1, column 16."));
@@ -353,19 +373,19 @@ public class QueryParserTest {
     @Test
     public void testParsingStatementWithExtraToken() throws Exception {
         try {
-            SQLParser parser = new SQLParser(new StringReader(
+            String sql = ((
                     "select a,, from b\n" +
                     "where e = d\n"));
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught exception.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \",\" at line 1, column 10."));
         }
         try {
-            SQLParser parser = new SQLParser(new StringReader(
+            String sql = ((
                     "select a from from b\n" +
                     "where e = d\n"));
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught exception.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"from\" at line 1, column 15."));
@@ -375,10 +395,10 @@ public class QueryParserTest {
     @Test
     public void testParsingStatementWithMissingToken() throws Exception {
         try {
-            SQLParser parser = new SQLParser(new StringReader(
+            String sql = ((
                     "select a b\n" +
                     "where e = d\n"));
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught exception.");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode());
@@ -389,7 +409,7 @@ public class QueryParserTest {
     public void testParseCreateTableInlinePrimaryKeyWithOrder() throws Exception {
     	for (String order : new String[]{"asc", "desc"}) {
             String s = "create table core.entity_history_archive (id char(15) primary key ${o})".replace("${o}", order);
-    		CreateTableStatement stmt = (CreateTableStatement)new SQLParser(new StringReader(s)).parseStatement();
+    		CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement();
     		List<ColumnDef> columnDefs = stmt.getColumnDefs();
     		assertEquals(1, columnDefs.size());
     		assertEquals(SortOrder.fromDDLValue(order), columnDefs.iterator().next().getSortOrder()); 
@@ -401,7 +421,7 @@ public class QueryParserTest {
     	for (String order : new String[]{"asc", "desc"}) {
     		String stmt = "create table core.entity_history_archive (id varchar(20) ${o})".replace("${o}", order);
     		try {
-    			new SQLParser(new StringReader(stmt)).parseStatement();
+    			new SQLParser((stmt)).parseStatement();
     			fail("Expected parse exception to be thrown");
     		} catch (SQLException e) {
     			String errorMsg = "ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"RPAREN\", got \"${o}\"".replace("${o}", order);
@@ -414,7 +434,7 @@ public class QueryParserTest {
     public void testParseCreateTablePrimaryKeyConstraintWithOrder() throws Exception {
     	for (String order : new String[]{"asc", "desc"}) {
     		String s = "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150), constraint pk primary key (id ${o}, name ${o}))".replace("${o}", order);
-    		CreateTableStatement stmt = (CreateTableStatement)new SQLParser(new StringReader(s)).parseStatement();
+    		CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement();
     		PrimaryKeyConstraint pkConstraint = stmt.getPrimaryKeyConstraint();
     		List<Pair<ColumnName,SortOrder>> columns = pkConstraint.getColumnNames();
     		assertEquals(2, columns.size());
@@ -429,7 +449,7 @@ public class QueryParserTest {
         for (String leadingComma : new String[]{",", ""}) {
             String s = "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150)${o} constraint pk primary key (id))".replace("${o}", leadingComma);
 
-            CreateTableStatement stmt = (CreateTableStatement)new SQLParser(new StringReader(s)).parseStatement();
+            CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement();
 
             assertEquals(2, stmt.getColumnDefs().size());
             assertNotNull(stmt.getPrimaryKeyConstraint());
@@ -438,11 +458,11 @@ public class QueryParserTest {
 
     @Test
     public void testInvalidTrailingCommaOnCreateTable() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "create table foo (c1 varchar primary key, c2 varchar,)"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode());
@@ -451,42 +471,42 @@ public class QueryParserTest {
 
     @Test
 	public void testCreateSequence() throws Exception {
-		SQLParser parser = new SQLParser(new StringReader(
+		String sql = ((
 				"create sequence foo.bar\n" + 
 						"start with 0\n"	+ 
 						"increment by 1\n"));
-		parser.parseStatement();
+		parseQuery(sql);
 	}
 	
 	@Test
 	public void testNextValueForSelect() throws Exception {
-		SQLParser parser = new SQLParser(new StringReader(
+		String sql = ((
 				"select next value for foo.bar \n" + 
 						"from core.custom_entity_data\n"));						
-		parser.parseStatement();
+		parseQuery(sql);
 	}
 	
 	@Test
     public void testNextValueForWhere() throws Exception {
-        SQLParser parser = new SQLParser(new StringReader(
+        String sql = ((
                 "upsert into core.custom_entity_data\n" + 
                         "select next value for foo.bar from core.custom_entity_data\n"));                    
-        parser.parseStatement();
+        parseQuery(sql);
     }
 	
     public void testBadCharDef() throws Exception {
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR(0))");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad char definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 208 (22003): CHAR or VARCHAR must have a positive length. columnName=COL"));
         }
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR)");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad char definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 207 (22003): Missing length for CHAR. columnName=COL"));
@@ -496,9 +516,9 @@ public class QueryParserTest {
     @Test
     public void testBadVarcharDef() throws Exception {
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col VARCHAR(0))");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad varchar definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 208 (22003): CHAR or VARCHAR must have a positive length. columnName=COL"));
@@ -508,17 +528,17 @@ public class QueryParserTest {
     @Test
     public void testBadDecimalDef() throws Exception {
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(0, 5))");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad decimal definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL"));
         }
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(40, 5))");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad decimal definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL"));
@@ -528,17 +548,17 @@ public class QueryParserTest {
     @Test
     public void testBadBinaryDef() throws Exception {
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadBinaryDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadBinaryDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY(0))");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad binary definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 211 (22003): BINARY must have a positive length. columnName=COL"));
         }
         try {
-            SQLParser parser = new SQLParser("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
+            String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + 
                     "  (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY)");
-            parser.parseStatement();
+            parseQuery(sql);
             fail("Should have caught bad char definition.");
         } catch (SQLException e) {
             assertTrue(e.getMessage(), e.getMessage().contains("ERROR 210 (22003): Missing length for BINARY. columnName=COL"));
@@ -547,67 +567,67 @@ public class QueryParserTest {
 
     @Test
     public void testPercentileQuery1() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY salary DESC) from core.custom_index_value ind"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testPercentileQuery2() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY mark ASC) from core.custom_index_value ind"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
     
     @Test
     public void testRowValueConstructorQuery() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select a_integer FROM aTable where (x_integer, y_integer) > (3, 4)"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testSingleTopLevelNot() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where not c = 5"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testTopLevelNot() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where not c"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testRVCInList() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where k in ( (1,2), (3,4) )"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testInList() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where k in ( 1,2 )"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testInvalidSelectStar() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select *,k from t where k in ( 1,2 )"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode());
@@ -616,11 +636,11 @@ public class QueryParserTest {
 
     @Test
     public void testInvalidUpsertSelectHint() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "upsert into t select /*+ NO_INDEX */ k from t where k in ( 1,2 )"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode());
@@ -629,11 +649,11 @@ public class QueryParserTest {
 
     @Test
     public void testTableNameStartsWithUnderscore() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select* from _t where k in ( 1,2 )"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode());
@@ -642,16 +662,16 @@ public class QueryParserTest {
 
     @Test
     public void testValidUpsertSelectHint() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "upsert /*+ NO_INDEX */ into t select k from t where k in ( 1,2 )"));
-            parser.parseStatement();
+            parseQuery(sql);
     }
 
     @Test
     public void testHavingWithNot() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select\n" + 
                         "\"WEB_STAT_ALIAS\".\"DOMAIN\" as \"c0\"\n" + 
                         "from \"WEB_STAT\" \"WEB_STAT_ALIAS\"\n" + 
@@ -668,73 +688,73 @@ public class QueryParserTest {
                         ")\n" + 
                         "order by CASE WHEN \"WEB_STAT_ALIAS\".\"DOMAIN\" IS NULL THEN 1 ELSE 0 END,\n" + 
                         "\"WEB_STAT_ALIAS\".\"DOMAIN\" ASC"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testToDateInList() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader("select * from date_test where d in (to_date('2013-11-04 09:12:00'))"));
-        parser.parseStatement();
+        String sql = (
+                ("select * from date_test where d in (to_date('2013-11-04 09:12:00'))"));
+        parseQuery(sql);
     }
     
     @Test
     public void testDateLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = DATE '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testTimeLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = TIME '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
 
     @Test
     public void testTimestampLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = TIMESTAMP '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
     
     @Test
     public void testUnsignedDateLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = UNSIGNED_DATE '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
     @Test
     public void testUnsignedTimeLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = UNSIGNED_TIME '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
 
 
     @Test
     public void testUnsignedTimestampLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = UNSIGNED_TIMESTAMP '2013-11-04 09:12:00'"));
-        parser.parseStatement();
+        parseQuery(sql);
     }
     
     @Test
     public void testUnknownLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = FOO '2013-11-04 09:12:00'"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode());
@@ -743,14 +763,32 @@ public class QueryParserTest {
     
     @Test
     public void testUnsupportedLiteral() throws Exception {
-        SQLParser parser = new SQLParser(
-                new StringReader(
+        String sql = (
+                (
                         "select * from t where d = DECIMAL '2013-11-04 09:12:00'"));
         try {
-            parser.parseStatement();
+            parseQuery(sql);
             fail();
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode());
         }
     }
+    
+    @Test
+    public void testAnyElementExpression1() throws Exception {
+        String sql = "select * from t where 'a' = ANY(a)";
+        parseQuery(sql);
+    }
+
+    @Test
+    public void testAnyElementExpression2() throws Exception {
+        String sql = "select * from t where 'a' <= ANY(a-b+1)";
+        parseQuery(sql);
+    }
+
+    @Test
+    public void testAllElementExpression() throws Exception {
+        String sql = "select * from t where 'a' <= ALL(a-b+1)";
+        parseQuery(sql);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
index 8f17a7c..abaaeb5 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
@@ -32,17 +32,23 @@ import static org.apache.phoenix.util.TestUtil.PTSDB3_NAME;
 import static org.apache.phoenix.util.TestUtil.PTSDB_NAME;
 import static org.apache.phoenix.util.TestUtil.TABLE_WITH_ARRAY;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.sql.DriverManager;
+import java.sql.SQLException;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
+import org.apache.phoenix.parse.BindableStatement;
+import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
@@ -146,4 +152,21 @@ public class BaseConnectionlessQueryTest extends BaseTest {
         }
     }
 
+    protected static void assertRoundtrip(String sql) throws SQLException {
+        SQLParser parser = new SQLParser(sql);
+        BindableStatement stmt = null;
+        stmt = parser.parseStatement();
+        if (stmt.getOperation() != Operation.QUERY) {
+            return;
+        }
+        String newSQL = stmt.toString();
+        SQLParser newParser = new SQLParser(newSQL);
+        BindableStatement newStmt = null;
+        try {
+            newStmt = newParser.parseStatement();
+        } catch (SQLException e) {
+            fail("Unable to parse new:\n" + newSQL);
+        }
+        assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/abeaa74a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
index 321567c..90730bc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
@@ -1746,4 +1746,11 @@ public class PDataTypeTest {
              coercibleToMap.toString());
     }
     
+    @Test
+    public void testIntVersusLong() {
+        long l = -1L;
+        int i = -1;
+        assertTrue(PLong.INSTANCE.compareTo(l, i, PInteger.INSTANCE)==0);
+        assertTrue(PInteger.INSTANCE.compareTo(i, l, PLong.INSTANCE)==0);
+    }
 }


[35/50] [abbrv] phoenix git commit: PHOENIX-1669 Views are not always properly updated with physical table indexes when cached

Posted by ma...@apache.org.
PHOENIX-1669 Views are not always properly updated with physical table indexes when cached


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c633151d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c633151d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c633151d

Branch: refs/heads/calcite
Commit: c633151dafcaf703c8daa5f92dbaddd7eeb89891
Parents: 2e5a630
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Feb 17 19:37:02 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Feb 17 19:37:02 2015 -0800

----------------------------------------------------------------------
 .../end2end/index/IndexExpressionIT.java        |  3 --
 .../apache/phoenix/schema/MetaDataClient.java   | 39 ++++++++++++--------
 .../org/apache/phoenix/schema/PTableImpl.java   |  2 +-
 3 files changed, 24 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c633151d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 5c51bda..1e3733b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -1220,8 +1220,6 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
         assertEquals(1, rs.getLong(1));
         assertFalse(rs.next());
         
-        conn.createStatement().execute("CREATE INDEX vi1 on v(k2)");
-
         //i2 should be used since it contains s3||'_'||s4 i
         String query = "SELECT s2||'_'||s3 FROM v WHERE k2=1 AND (s2||'_'||s3)='abc_cab'";
         rs = conn.createStatement(  ).executeQuery("EXPLAIN " + query);
@@ -1235,7 +1233,6 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
         assertFalse(rs.next());
         
         conn.createStatement().execute("ALTER VIEW v DROP COLUMN s4");
-        conn.createStatement().execute("CREATE INDEX vi2 on v(k2)");
         //i2 cannot be used since s4 has been dropped from the view, so i1 will be used 
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         queryPlan = QueryUtil.getExplainPlan(rs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c633151d/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 64e62f5..831616b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -360,8 +360,7 @@ public class MetaDataClient {
                 // Otherwise, a tenant would be required to create a VIEW first
                 // which is not really necessary unless you want to filter or add
                 // columns
-                addIndexesFromPhysicalTable(result);
-                connection.addTable(result.getTable());
+                addTableToCache(result);
                 return result;
             } else {
                 // if (result.getMutationCode() == MutationCode.NEWER_TABLE_FOUND) {
@@ -369,16 +368,20 @@ public class MetaDataClient {
                 // Since we disallow creation or modification of a table earlier than the latest
                 // timestamp, we can handle this such that we don't ask the
                 // server again.
-                // If table was not found at the current time stamp and we have one cached, remove it.
-                // Otherwise, we're up to date, so there's nothing to do.
                 if (table != null) {
+                    // Ensures that table in result is set to table found in our cache.
                     result.setTable(table);
                     if (code == MutationCode.TABLE_ALREADY_EXISTS) {
+                        // Although this table is up-to-date, the parent table may not be.
+                        // In this case, we update the parent table which may in turn pull
+                        // in indexes to add to this table.
                         if (addIndexesFromPhysicalTable(result)) {
                             connection.addTable(result.getTable());
                         }
                         return result;
                     }
+                    // If table was not found at the current time stamp and we have one cached, remove it.
+                    // Otherwise, we're up to date, so there's nothing to do.
                     if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
                         connection.removeTable(tenantId, fullTableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                     }
@@ -1683,7 +1686,7 @@ public class MetaDataClient {
             MutationCode code = result.getMutationCode();
             switch(code) {
             case TABLE_ALREADY_EXISTS:
-                connection.addTable(result.getTable());
+                addTableToCache(result);
                 if (!statement.ifNotExists()) {
                     throw new TableAlreadyExistsException(schemaName, tableName, result.getTable());
                 }
@@ -1698,7 +1701,7 @@ public class MetaDataClient {
                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
                     .setSchemaName(schemaName).setTableName(tableName).build().buildException();
             case CONCURRENT_TABLE_MUTATION:
-                connection.addTable(result.getTable());
+                addTableToCache(result);
                 throw new ConcurrentTableMutationException(schemaName, tableName);
             default:
                 PName newSchemaName = PNameFactory.newName(schemaName);
@@ -1708,12 +1711,8 @@ public class MetaDataClient {
                         dataTableName == null ? null : newSchemaName, dataTableName == null ? null : PNameFactory.newName(dataTableName), Collections.<PTable>emptyList(), isImmutableRows,
                         physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType,
                         indexId, indexType);
-                connection.addTable(table);
-                if (tableType == PTableType.VIEW) {
-                    // Set wasUpdated to true to force attempt to add
-                    // indexes from physical table to view.
-                    addIndexesFromPhysicalTable(new MetaDataMutationResult(code, result.getMutationTime(), table, true));
-                }
+                result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
+                addTableToCache(result);
                 return table;
             }
         } finally {
@@ -1958,12 +1957,13 @@ public class MetaDataClient {
         case COLUMN_NOT_FOUND:
             break;
         case CONCURRENT_TABLE_MUTATION:
-            connection.addTable(result.getTable());
+            addTableToCache(result);
             if (logger.isDebugEnabled()) {
                 logger.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
             }
             throw new ConcurrentTableMutationException(schemaName, tableName);
         case NEWER_TABLE_FOUND:
+            // TODO: update cache?
 //            if (result.getTable() != null) {
 //                connection.addTable(result.getTable());
 //            }
@@ -2254,7 +2254,7 @@ public class MetaDataClient {
                 try {
                     MutationCode code = processMutationResult(schemaName, tableName, result);
                     if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
-                        connection.addTable(result.getTable());
+                        addTableToCache(result);
                         if (!statement.ifNotExists()) {
                             throw new ColumnAlreadyExistsException(schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
                         }
@@ -2493,7 +2493,7 @@ public class MetaDataClient {
                 try {
                     MutationCode code = processMutationResult(schemaName, tableName, result);
                     if (code == MutationCode.COLUMN_NOT_FOUND) {
-                        connection.addTable(result.getTable());
+                        addTableToCache(result);
                         if (!statement.ifExists()) {
                             throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                         }
@@ -2603,7 +2603,7 @@ public class MetaDataClient {
             }
             if (code == MutationCode.TABLE_ALREADY_EXISTS) {
                 if (result.getTable() != null) { // To accommodate connection-less update of index state
-                    connection.addTable(result.getTable());
+                    addTableToCache(result);
                 }
             }
             if (newIndexState == PIndexState.BUILDING) {
@@ -2633,6 +2633,13 @@ public class MetaDataClient {
         }
     }
 
+    private PTable addTableToCache(MetaDataMutationResult result) throws SQLException {
+        addIndexesFromPhysicalTable(result);
+        PTable table = result.getTable();
+        connection.addTable(table);
+        return table;
+    }
+    
     private void throwIfAlteringViewPK(ColumnDef col, PTable table) throws SQLException {
         if (col != null && col.isPK() && table.getType() == PTableType.VIEW) {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MODIFY_VIEW_PK)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c633151d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 08f74b7..e14565d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -190,7 +190,7 @@ public class PTableImpl implements PTable {
     public static PTableImpl makePTable(PTable table, long timeStamp, List<PTable> indexes, PName parentSchemaName, String viewStatement) throws SQLException {
         return new PTableImpl(
                 table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
-                table.getSequenceNumber() + 1, table.getPKName(), table.getBucketNum(), getColumnsToClone(table), parentSchemaName, table.getParentTableName(),
+                table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table), parentSchemaName, table.getParentTableName(),
                 indexes, table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), viewStatement,
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
     }


[23/50] [abbrv] phoenix git commit: PHOENIX-688 Add to_time and to_timestamp built-in functions

Posted by ma...@apache.org.
PHOENIX-688 Add to_time and to_timestamp built-in functions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/11a76b29
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/11a76b29
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/11a76b29

Branch: refs/heads/calcite
Commit: 11a76b297fad46cd7f51019810ba4d1a7b51b418
Parents: 78d91d1
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Feb 7 23:54:53 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Feb 9 18:37:14 2015 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/ProductMetricsIT.java       |   4 +-
 .../phoenix/end2end/ToDateFunctionIT.java       |  67 +++++-
 .../phoenix/end2end/TruncateFunctionIT.java     |   5 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |   6 +-
 .../phoenix/end2end/VariableLengthPKIT.java     |   3 +-
 .../phoenix/mapreduce/CsvBulkLoadToolIT.java    |  10 +-
 .../phoenix/compile/StatementContext.java       |  28 ++-
 .../phoenix/expression/ExpressionType.java      |  10 +-
 .../expression/function/ToDateFunction.java     |  93 ++++++--
 .../expression/function/ToTimeFunction.java     |  63 ++++++
 .../function/ToTimestampFunction.java           |  63 ++++++
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  20 +-
 .../apache/phoenix/parse/ToDateParseNode.java   |   6 +-
 .../apache/phoenix/parse/ToTimeParseNode.java   |  48 +++++
 .../phoenix/parse/ToTimestampParseNode.java     |  48 +++++
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../org/apache/phoenix/schema/types/PDate.java  |  12 +-
 .../org/apache/phoenix/schema/types/PTime.java  |  16 +-
 .../apache/phoenix/schema/types/PTimestamp.java |  24 +--
 .../java/org/apache/phoenix/util/DateUtil.java  | 213 +++++++++++++------
 .../phoenix/util/csv/CsvUpsertExecutor.java     |  52 +++--
 .../phoenix/compile/WhereCompilerTest.java      |   8 +-
 .../expression/SortOrderExpressionTest.java     |  34 +--
 .../org/apache/phoenix/util/DateUtilTest.java   |  35 +--
 24 files changed, 656 insertions(+), 215 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
index 975541e..ddc5fab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
@@ -31,8 +31,6 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
-import java.text.Format;
-import java.text.ParseException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
@@ -87,7 +85,7 @@ public class ProductMetricsIT extends BaseClientManagedTimeIT {
     }
     
     private static Date toDate(String dateString) {
-        return DateUtil.parseDateTime(dateString);
+        return DateUtil.parseDate(dateString);
     }
     
     private static void initTable(byte[][] splits, long ts) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
index 984e21b..bda4ea5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
@@ -18,24 +18,25 @@
 
 package org.apache.phoenix.end2end;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.DateUtil;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 
 public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
 
@@ -53,18 +54,26 @@ public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
         conn.close();
     }
 
-    private static Date callToDateFunction(Connection conn, String invocation) throws SQLException {
+    private static java.util.Date callToDateFunction(Connection conn, String invocation) throws SQLException {
         Statement stmt = conn.createStatement();
-        ResultSet rs = stmt.executeQuery(String.format("SELECT %s FROM SYSTEM.CATALOG", invocation));
+        ResultSet rs = stmt.executeQuery(String.format("SELECT %s FROM SYSTEM.CATALOG LIMIT 1", invocation));
         assertTrue(rs.next());
-        Date returnValue = rs.getDate(1);
+        java.util.Date returnValue = (java.util.Date)rs.getObject(1);
         rs.close();
         stmt.close();
         return returnValue;
     }
 
     private Date callToDateFunction(String invocation) throws SQLException {
-        return callToDateFunction(conn, invocation);
+        return (Date)callToDateFunction(conn, invocation);
+    }
+
+    private Time callToTimeFunction(String invocation) throws SQLException {
+        return (Time)callToDateFunction(conn, invocation);
+    }
+
+    private Timestamp callToTimestampFunction(String invocation) throws SQLException {
+        return (Timestamp)callToDateFunction(conn, invocation);
     }
 
     @Test
@@ -87,6 +96,44 @@ public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
     }
 
     @Test
+    public void testToTime_Default() throws SQLException {
+        // Default time zone is GMT, so this is timestamp 0
+        assertEquals(0L, callToTimeFunction("TO_TIME('1970-01-01 00:00:00')").getTime());
+        assertEquals(0L, callToTimeFunction("TO_TIME('1970-01-01 00:00:00.000')").getTime());
+        assertEquals(0L, callToTimeFunction("TO_TIME('1970-01-01')").getTime());
+        assertEquals(0L, callToTimeFunction("TO_TIME('1970/01/01','yyyy/MM/dd')").getTime());
+
+        // Test other ISO 8601 Date Compliant Formats to verify they can be parsed
+        try {
+            callToTimeFunction("TO_TIME('2015-01-27T16:17:57+00:00')");
+            callToTimeFunction("TO_TIME('2015-01-27T16:17:57Z')");
+            callToTimeFunction("TO_TIME('2015-W05')");
+            callToTimeFunction("TO_TIME('2015-W05-2')");
+        } catch (Exception ex) {
+            fail("TO_TIME Parse ISO8601 Time Failed due to:" + ex);
+        }
+    }
+
+    @Test
+    public void testToTimestamp_Default() throws SQLException {
+        // Default time zone is GMT, so this is timestamp 0
+        assertEquals(0L, callToTimestampFunction("TO_TIMESTAMP('1970-01-01 00:00:00')").getTime());
+        assertEquals(0L, callToTimestampFunction("TO_TIMESTAMP('1970-01-01 00:00:00.000')").getTime());
+        assertEquals(0L, callToTimestampFunction("TO_TIMESTAMP('1970-01-01')").getTime());
+        assertEquals(0L, callToTimestampFunction("TO_TIMESTAMP('1970/01/01','yyyy/MM/dd')").getTime());
+
+        // Test other ISO 8601 Date Compliant Formats to verify they can be parsed
+        try {
+            callToTimestampFunction("TO_TIMESTAMP('2015-01-27T16:17:57+00:00')");
+            callToTimestampFunction("TO_TIMESTAMP('2015-01-27T16:17:57Z')");
+            callToTimestampFunction("TO_TIMESTAMP('2015-W05')");
+            callToTimestampFunction("TO_TIMESTAMP('2015-W05-2')");
+        } catch (Exception ex) {
+            fail("TO_TIMESTAMP Parse ISO8601 Time Failed due to:" + ex);
+        }
+    }
+
+    @Test
     public void testToDate_CustomDateFormat() throws SQLException {
         // A date without time component is at midnight
         assertEquals(0L, callToDateFunction("TO_DATE('1970-01-01', 'yyyy-MM-dd')").getTime());
@@ -115,7 +162,7 @@ public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
 
         assertEquals(
                 -ONE_HOUR_IN_MILLIS,
-                callToDateFunction(customTimeZoneConn, "TO_DATE('1970-01-01 00:00:00')").getTime());
+                callToDateFunction(customTimeZoneConn, "TO_DATE('1970-01-01 00:00:00.000')").getTime());
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
index 59c499d..9e8f2c0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
@@ -32,7 +32,6 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.Timestamp;
-import java.text.Format;
 import java.text.ParseException;
 import java.util.Properties;
 
@@ -47,11 +46,11 @@ public class TruncateFunctionIT extends BaseClientManagedTimeIT {
     private static final String DS3 = "1970-01-30 01:30:24.353";
     
     private static Date toDate(String s) throws ParseException {
-        return DateUtil.parseDateTime(s);
+        return DateUtil.parseDate(s);
     }
     
     private static Timestamp toTimestamp(String s) throws ParseException {
-        return new Timestamp((DateUtil.parseDateTime(s)).getTime());
+        return DateUtil.parseTimestamp(s);
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
index b44fbff..8e07af5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
@@ -33,8 +33,8 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.sql.Timestamp;
 import java.sql.Time;
+import java.sql.Timestamp;
 import java.util.Properties;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -156,7 +156,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
         ResultSet rs = conn.createStatement().executeQuery("select k,to_char(date) from UpsertDateTest");
         assertTrue(rs.next());
         assertEquals("a", rs.getString(1));
-        assertEquals("2013-06-08 00:00:00", rs.getString(2));
+        assertEquals("2013-06-08 00:00:00.000", rs.getString(2));
     }
 
     @Test
@@ -548,7 +548,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
     }
     
     private static Date toDate(String dateString) {
-        return DateUtil.parseDateTime(dateString);
+        return DateUtil.parseDate(dateString);
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
index 0d9aeb2..417d147 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
@@ -38,7 +38,6 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
 import java.text.Format;
-import java.text.ParseException;
 import java.util.Properties;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -55,7 +54,7 @@ public class VariableLengthPKIT extends BaseClientManagedTimeIT {
     private static final Date D1 = toDate(DS1);
 
     private static Date toDate(String dateString) {
-        return DateUtil.parseDateTime(dateString);
+        return DateUtil.parseDate(dateString);
     }
 
     protected static void initGroupByRowKeyColumns(long ts) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
index 00968ae..392395d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
@@ -44,12 +44,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 @Category(NeedsOwnMiniClusterTest.class)
 public class CsvBulkLoadToolIT {
 
@@ -119,11 +113,11 @@ public class CsvBulkLoadToolIT {
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals("Name 1", rs.getString(2));
-        assertEquals(DateUtil.parseDateTime("1970-01-01"), rs.getDate(3));
+        assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
         assertTrue(rs.next());
         assertEquals(2, rs.getInt(1));
         assertEquals("Name 2", rs.getString(2));
-        assertEquals(DateUtil.parseDateTime("1970-01-02"), rs.getDate(3));
+        assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
         assertFalse(rs.next());
 
         rs.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index f48f613..d726488 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -60,8 +60,12 @@ public class StatementContext {
     private final ExpressionManager expressions;
     private final AggregationManager aggregates;
     private final String dateFormat;
-    private final TimeZone dateFormatTimeZone;
     private final Format dateFormatter;
+    private final String timeFormat;
+    private final Format timeFormatter;
+    private final String timestampFormat;
+    private final Format timestampFormatter;
+    private final TimeZone dateFormatTimeZone;
     private final String numberFormat;
     private final ImmutableBytesWritable tempPtr;
     private final PhoenixStatement statement;
@@ -99,9 +103,13 @@ public class StatementContext {
         this.expressions = new ExpressionManager();
         PhoenixConnection connection = statement.getConnection();
         this.dateFormat = connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
+        this.dateFormatter = DateUtil.getDateFormatter(dateFormat);
+        this.timeFormat = connection.getQueryServices().getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
+        this.timeFormatter = DateUtil.getTimeFormatter(timeFormat);
+        this.timestampFormat = connection.getQueryServices().getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
+        this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat);
         this.dateFormatTimeZone = TimeZone.getTimeZone(
                 connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID));
-        this.dateFormatter = DateUtil.getDateFormatter(dateFormat);
         this.numberFormat = connection.getQueryServices().getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
         this.tempPtr = new ImmutableBytesWritable();
         this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null;
@@ -151,6 +159,22 @@ public class StatementContext {
         return dateFormatter;
     }
 
+    public String getTimeFormat() {
+        return timeFormat;
+    }
+
+    public Format getTimeFormatter() {
+        return timeFormatter;
+    }
+
+    public String getTimestampFormat() {
+        return timestampFormat;
+    }
+
+    public Format getTimestampFormatter() {
+        return timestampFormatter;
+    }
+
     public String getNumberFormat() {
         return numberFormat;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 706a751..cbbfe4a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -21,6 +21,7 @@ import java.util.Map;
 
 import org.apache.phoenix.expression.function.ArrayAllComparisonExpression;
 import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
+import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.expression.function.ArrayLengthFunction;
 import org.apache.phoenix.expression.function.CeilDateExpression;
@@ -40,7 +41,6 @@ import org.apache.phoenix.expression.function.FloorDateExpression;
 import org.apache.phoenix.expression.function.FloorDecimalExpression;
 import org.apache.phoenix.expression.function.FloorFunction;
 import org.apache.phoenix.expression.function.IndexStateNameFunction;
-import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.InvertFunction;
 import org.apache.phoenix.expression.function.LTrimFunction;
 import org.apache.phoenix.expression.function.LastValueFunction;
@@ -76,6 +76,8 @@ import org.apache.phoenix.expression.function.TimezoneOffsetFunction;
 import org.apache.phoenix.expression.function.ToCharFunction;
 import org.apache.phoenix.expression.function.ToDateFunction;
 import org.apache.phoenix.expression.function.ToNumberFunction;
+import org.apache.phoenix.expression.function.ToTimeFunction;
+import org.apache.phoenix.expression.function.ToTimestampFunction;
 import org.apache.phoenix.expression.function.TrimFunction;
 import org.apache.phoenix.expression.function.TruncFunction;
 import org.apache.phoenix.expression.function.UpperFunction;
@@ -187,7 +189,11 @@ public enum ExpressionType {
     ModulusExpression(ModulusExpression.class),
     DistinctValueAggregateFunction(DistinctValueAggregateFunction.class),
     RegexpSplitFunctiond(RegexpSplitFunction.class),
-    RandomFunction(RandomFunction.class);
+    RandomFunction(RandomFunction.class),
+    ToTimeFunction(ToTimeFunction.class),
+    ToTimestampFunction(ToTimestampFunction.class),
+    ;
+    
     ExpressionType(Class<? extends Expression> clazz) {
         this.clazz = clazz;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
index 73ca3ed..01b0dfd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
@@ -17,24 +17,23 @@
  */
 package org.apache.phoenix.expression.function;
 
-import java.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
 import java.sql.SQLException;
-import java.text.Format;
-import java.text.ParseException;
 import java.util.List;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.WritableUtils;
-
 import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.parse.*;
+import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.parse.FunctionParseNode.Argument;
 import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
-import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.parse.ToDateParseNode;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.DateUtil;
 
 
@@ -46,7 +45,6 @@ import org.apache.phoenix.util.DateUtil;
  * valid (constant) timezone id, or the string "local". The third argument is also optional, and
  * it defaults to GMT.
  *
- * @since 0.1
  */
 @BuiltInFunction(name=ToDateFunction.NAME, nodeClass=ToDateParseNode.class,
         args={@Argument(allowedTypes={PVarchar.class}),
@@ -56,33 +54,47 @@ public class ToDateFunction extends ScalarFunction {
     public static final String NAME = "TO_DATE";
     private DateUtil.DateTimeParser dateParser;
     private String dateFormat;
+    private String timeZoneId;
 
     public ToDateFunction() {
     }
 
-    public ToDateFunction(List<Expression> children, String dateFormat, DateUtil.DateTimeParser dateParser) throws SQLException {
-        super(children.subList(0, 1));
+    public ToDateFunction(List<Expression> children, String dateFormat, String timeZoneId) throws SQLException {
+        super(children);
+        init(dateFormat, timeZoneId);
+    }
+    
+    private void init(String dateFormat, String timeZoneId) {
         this.dateFormat = dateFormat;
-        this.dateParser = dateParser;
+        this.dateParser = DateUtil.getDateTimeParser(dateFormat, getDataType(), timeZoneId);
+        // Store resolved timeZoneId, as if it's LOCAL, we don't want the
+        // server to evaluate using the local time zone. Instead, we want
+        // to use the client local time zone.
+        this.timeZoneId = this.dateParser.getTimeZone().getID();
     }
 
     @Override
     public int hashCode() {
         final int prime = 31;
-        int result = 1;
-        result = prime * result + dateFormat.hashCode();
-        result = prime * result + getExpression().hashCode();
+        int result = super.hashCode();
+        result = prime * result + ((dateFormat == null) ? 0 : dateFormat.hashCode());
+        result = prime * result + ((timeZoneId == null) ? 0 : timeZoneId.hashCode());
         return result;
     }
 
     @Override
     public boolean equals(Object obj) {
         if (this == obj) return true;
-        if (obj == null) return false;
         if (getClass() != obj.getClass()) return false;
         ToDateFunction other = (ToDateFunction)obj;
-        if (!getExpression().equals(other.getExpression())) return false;
-        if (!dateFormat.equals(other.dateFormat)) return false;
+        // Only compare first child, as the other two are potentially resolved on the fly.
+        if (!this.getChildren().get(0).equals(other.getChildren().get(0))) return false;
+        if (dateFormat == null) {
+            if (other.dateFormat != null) return false;
+        } else if (!dateFormat.equals(other.dateFormat)) return false;
+        if (timeZoneId == null) {
+            if (other.timeZoneId != null) return false;
+        } else if (!timeZoneId.equals(other.timeZoneId)) return false;
         return true;
     }
 
@@ -94,8 +106,10 @@ public class ToDateFunction extends ScalarFunction {
         }
         PDataType type = expression.getDataType();
         String dateStr = (String)type.toObject(ptr, expression.getSortOrder());
-        Object value = dateParser.parseDateTime(dateStr);
-        byte[] byteValue = getDataType().toBytes(value);
+        long epochTime = dateParser.parseDateTime(dateStr);
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeLong(epochTime, byteValue, 0);
         ptr.set(byteValue);
         return true;
      }
@@ -110,17 +124,50 @@ public class ToDateFunction extends ScalarFunction {
         return getExpression().isNullable();
     }
 
+    private String getTimeZoneIdArg() {
+        return children.size() < 3 ? null : (String) ((LiteralExpression) children.get(2)).getValue();
+    }
+    
+    private String getDateFormatArg() {
+        return children.size() < 2 ? null : (String) ((LiteralExpression) children.get(1)).getValue();
+    }
+    
     @Override
     public void readFields(DataInput input) throws IOException {
         super.readFields(input);
-        dateFormat = WritableUtils.readString(input);
-        dateParser = DateUtil.getDateParser(dateFormat);
+        String timeZoneId;
+        String dateFormat = WritableUtils.readString(input);  
+        if (dateFormat.length() != 0) { // pre 4.3
+            timeZoneId = DateUtil.DEFAULT_TIME_ZONE_ID;         
+        } else {
+            int nChildren = children.size();
+            if (nChildren == 1) {
+                dateFormat = WritableUtils.readString(input); 
+                timeZoneId =  WritableUtils.readString(input);
+            } else if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) {
+                dateFormat = getDateFormatArg();
+                timeZoneId =  WritableUtils.readString(input);
+            } else {
+                dateFormat = getDateFormatArg();
+                timeZoneId =  getTimeZoneIdArg();
+            }
+        }
+        init(dateFormat, timeZoneId);
     }
 
     @Override
     public void write(DataOutput output) throws IOException {
         super.write(output);
-        WritableUtils.writeString(output, dateFormat);
+        WritableUtils.writeString(output, ""); // For b/w compat
+        int nChildren = children.size();
+        // If dateFormat and/or timeZoneId are supplied as children, don't write them again,
+        // except if using LOCAL, in which case we want to write the resolved/actual time zone.
+        if (nChildren == 1) {
+            WritableUtils.writeString(output, dateFormat);
+            WritableUtils.writeString(output, timeZoneId);
+        } else if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) {
+            WritableUtils.writeString(output, timeZoneId);
+        }
     }
 
     private Expression getExpression() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java
new file mode 100644
index 0000000..3a26584
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.parse.ToTimeParseNode;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PTime;
+import org.apache.phoenix.schema.types.PVarchar;
+
+/**
+*
+* Implementation of the {@code TO_TIME(<string>,[<format-string>,[<timezone-string>]])} built-in function.
+* The second argument is optional and defaults to the phoenix.query.dateFormat value
+* from the HBase config. If present it must be a constant string. The third argument is either a
+* valid (constant) timezone id, or the string "LOCAL". The third argument is also optional, and
+* it defaults to GMT.
+*
+*/
+@BuiltInFunction(name=ToTimeFunction.NAME, nodeClass=ToTimeParseNode.class,
+       args={@Argument(allowedTypes={PVarchar.class}),
+               @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null"),
+               @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } )
+public class ToTimeFunction extends ToDateFunction {
+    public static final String NAME = "TO_TIME";
+
+    public ToTimeFunction() {
+    }
+
+    public ToTimeFunction(List<Expression> children, String dateFormat, String timeZoneId) throws SQLException {
+        super(children, dateFormat, timeZoneId);
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PTime.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java
new file mode 100644
index 0000000..17643a2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.parse.ToTimestampParseNode;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PVarchar;
+
+/**
+*
+* Implementation of the {@code TO_TIMESTAMP(<string>,[<format-string>,[<timezone-string>]])} built-in function.
+* The second argument is optional and defaults to the phoenix.query.timestampFormat value
+* from the HBase config. If present it must be a constant string. The third argument is either a
+* valid (constant) timezone id, or the string "local". The third argument is also optional, and
+* it defaults to GMT.
+*
+*/
+@BuiltInFunction(name=ToTimestampFunction.NAME, nodeClass=ToTimestampParseNode.class,
+       args={@Argument(allowedTypes={PVarchar.class}),
+               @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null"),
+               @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } )
+public class ToTimestampFunction extends ToDateFunction {
+    public static final String NAME = "TO_TIMESTAMP";
+
+    public ToTimestampFunction() {
+    }
+
+    public ToTimestampFunction(List<Expression> children, String dateFormat, String timeZoneId) throws SQLException {
+        super(children, dateFormat, timeZoneId);
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PTimestamp.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index b778a57..de9e323 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -128,6 +128,8 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     private PMetaData metaData;
     private final PName tenantId;
     private final String datePattern;
+    private final String timePattern;
+    private final String timestampPattern;
     
     private boolean isClosed = false;
     private Sampler<?> sampler;
@@ -204,15 +206,19 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this.tenantId = tenantId;
         this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, this.services.getProps());
         datePattern = this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
+        timePattern = this.services.getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
+        timestampPattern = this.services.getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
         String numberPattern = this.services.getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
         int maxSize = this.services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-        Format dateTimeFormat = DateUtil.getDateFormatter(datePattern);
-        formatters.put(PDate.INSTANCE, dateTimeFormat);
-        formatters.put(PTime.INSTANCE, dateTimeFormat);
-        formatters.put(PTimestamp.INSTANCE, dateTimeFormat);
-        formatters.put(PUnsignedDate.INSTANCE, dateTimeFormat);
-        formatters.put(PUnsignedTime.INSTANCE, dateTimeFormat);
-        formatters.put(PUnsignedTimestamp.INSTANCE, dateTimeFormat);
+        Format dateFormat = DateUtil.getDateFormatter(datePattern);
+        Format timeFormat = DateUtil.getDateFormatter(timePattern);
+        Format timestampFormat = DateUtil.getDateFormatter(timestampPattern);
+        formatters.put(PDate.INSTANCE, dateFormat);
+        formatters.put(PTime.INSTANCE, timeFormat);
+        formatters.put(PTimestamp.INSTANCE, timestampFormat);
+        formatters.put(PUnsignedDate.INSTANCE, dateFormat);
+        formatters.put(PUnsignedTime.INSTANCE, timeFormat);
+        formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat);
         formatters.put(PDecimal.INSTANCE, FunctionArgumentType.NUMERIC.getFormatter(numberPattern));
         // We do not limit the metaData on a connection less than the global one,
         // as there's not much that will be cached here.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
index 6140dbc..fd4d16a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
@@ -18,16 +18,13 @@
 package org.apache.phoenix.parse;
 
 import java.sql.SQLException;
-import java.text.Format;
 import java.util.List;
-import java.util.TimeZone;
 
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.function.FunctionExpression;
 import org.apache.phoenix.expression.function.ToDateFunction;
-import org.apache.phoenix.util.DateUtil;
 
 
 public class ToDateParseNode extends FunctionParseNode {
@@ -46,7 +43,6 @@ public class ToDateParseNode extends FunctionParseNode {
         if (timeZoneId == null) {
             timeZoneId = context.getDateFormatTimeZone().getID();
         }
-        DateUtil.DateTimeParser dateParser = DateUtil.getDateParser(dateFormat, timeZoneId);
-        return new ToDateFunction(children, dateFormat, dateParser);
+        return new ToDateFunction(children, dateFormat, timeZoneId);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java
new file mode 100644
index 0000000..5f0a72d
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.function.FunctionExpression;
+import org.apache.phoenix.expression.function.ToTimeFunction;
+
+
+public class ToTimeParseNode extends FunctionParseNode { 
+
+    public ToTimeParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+    @Override
+    public FunctionExpression create(List<Expression> children, StatementContext context) throws SQLException {
+        String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue();
+        String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue();
+        if (dateFormat == null) {
+            dateFormat = context.getTimeFormat();
+        }
+        if (timeZoneId == null) {
+            timeZoneId = context.getDateFormatTimeZone().getID();
+        }
+        return new ToTimeFunction(children, dateFormat, timeZoneId);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java
new file mode 100644
index 0000000..2a3f5ec
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.function.FunctionExpression;
+import org.apache.phoenix.expression.function.ToTimestampFunction;
+
+
+public class ToTimestampParseNode extends FunctionParseNode { 
+
+    public ToTimestampParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+    @Override
+    public FunctionExpression create(List<Expression> children, StatementContext context) throws SQLException {
+        String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue();
+        String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue();
+        if (dateFormat == null) {
+            dateFormat = context.getTimestampFormat();
+        }
+        if (timeZoneId == null) {
+            timeZoneId = context.getDateFormatTimeZone().getID();
+        }
+        return new ToTimestampFunction(children, dateFormat, timeZoneId);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index d21695d..e20d5ee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -71,6 +71,9 @@ public interface QueryServices extends SQLCloseable {
     public static final String MAX_SERVER_CACHE_SIZE_ATTRIB = "phoenix.query.maxServerCacheBytes";
     public static final String DATE_FORMAT_TIMEZONE_ATTRIB = "phoenix.query.dateFormatTimeZone";
     public static final String DATE_FORMAT_ATTRIB = "phoenix.query.dateFormat";
+    public static final String TIME_FORMAT_ATTRIB = "phoenix.query.timeFormat";
+    public static final String TIMESTAMP_FORMAT_ATTRIB = "phoenix.query.timestampFormat";
+
     public static final String NUMBER_FORMAT_ATTRIB = "phoenix.query.numberFormat";
     public static final String CALL_QUEUE_ROUND_ROBIN_ATTRIB = "ipc.server.callqueue.roundrobin";
     public static final String SCAN_CACHE_SIZE_ATTRIB = "hbase.client.scanner.caching";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index 13a828f..bbd0a35 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -17,16 +17,16 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.util.DateUtil;
-
 import java.math.BigDecimal;
 import java.sql.Date;
 import java.sql.Types;
 import java.text.Format;
 
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.DateUtil;
+
 public class PDate extends PDataType<Date> {
 
   public static final PDate INSTANCE = new PDate();
@@ -71,7 +71,7 @@ public class PDate extends PDataType<Date> {
     } else if (actualType == PDecimal.INSTANCE) {
       return new Date(((BigDecimal) object).longValueExact());
     } else if (actualType == PVarchar.INSTANCE) {
-      return DateUtil.parseDateTime((String) object);
+      return DateUtil.parseDate((String) object);
     }
     return throwConstraintViolationException(actualType, this);
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
index d824885..81cbaff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
@@ -17,15 +17,15 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.util.DateUtil;
-
 import java.math.BigDecimal;
 import java.sql.Time;
 import java.sql.Types;
 import java.text.Format;
 
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.DateUtil;
+
 public class PTime extends PDataType<Time> {
 
   public static final PTime INSTANCE = new PTime();
@@ -78,7 +78,7 @@ public class PTime extends PDataType<Time> {
     } else if (actualType == PDecimal.INSTANCE) {
       return new java.sql.Time(((BigDecimal) object).longValueExact());
     } else if (actualType == PVarchar.INSTANCE) {
-      return DateUtil.parseDateTime((String) object);
+      return DateUtil.parseTime((String) object);
     }
     return throwConstraintViolationException(actualType, this);
   }
@@ -128,8 +128,10 @@ public class PTime extends PDataType<Time> {
 
   @Override
   public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    // TODO: different default formatter for TIME?
-    return PDate.INSTANCE.toStringLiteral(b, offset, length, formatter);
+      if (formatter == null) {
+          formatter = DateUtil.DEFAULT_TIME_FORMATTER;
+        }
+        return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index 4bdcb86..8182e33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -17,17 +17,17 @@
  */
 package org.apache.phoenix.schema.types;
 
+import java.math.BigDecimal;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.text.Format;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.DateUtil;
 
-import java.math.BigDecimal;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.text.Format;
-
 public class PTimestamp extends PDataType<Timestamp> {
 
   public static final PTimestamp INSTANCE = new PTimestamp();
@@ -84,7 +84,7 @@ public class PTimestamp extends PDataType<Timestamp> {
               .intValue();
       return DateUtil.getTimestamp(ms, nanos);
     } else if (actualType == PVarchar.INSTANCE) {
-      return new Timestamp(DateUtil.parseDateTime((String) object).getTime());
+      return DateUtil.parseTimestamp((String) object);
     }
     return throwConstraintViolationException(actualType, this);
   }
@@ -181,15 +181,13 @@ public class PTimestamp extends PDataType<Timestamp> {
 
   @Override
   public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    java.sql.Timestamp value = (java.sql.Timestamp) toObject(b, offset, length);
-    if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) {
-      // If default formatter has not been overridden,
-      // use one that displays milliseconds.
-      formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER;
-    }
-    return "'" + super.toStringLiteral(b, offset, length, formatter) + "." + value.getNanos() + "'";
+      if (formatter == null) {
+          formatter = DateUtil.DEFAULT_TIMESTAMP_FORMATTER;
+        }
+        return "'" + super.toStringLiteral(b, offset, length, formatter) + "'";
   }
 
+
   @Override
   public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) {
     int nanos = PUnsignedInt.INSTANCE.getCodec()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
index fbc74ba..0f4b54a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
@@ -21,85 +21,122 @@ import java.math.BigDecimal;
 import java.sql.Date;
 import java.sql.Time;
 import java.sql.Timestamp;
+import java.sql.Types;
 import java.text.Format;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
+import java.util.List;
 import java.util.TimeZone;
 
 import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.IllegalDataException;
+import org.apache.phoenix.schema.types.PDataType;
+import org.joda.time.DateTimeZone;
 import org.joda.time.chrono.ISOChronology;
 import org.joda.time.format.DateTimeFormatter;
 import org.joda.time.format.DateTimeFormatterBuilder;
 import org.joda.time.format.ISODateTimeFormat;
 
+import com.google.common.collect.Lists;
 
-@SuppressWarnings("serial")
+
+@SuppressWarnings({ "serial", "deprecation" })
 public class DateUtil {
     public static final String DEFAULT_TIME_ZONE_ID = "GMT";
+    public static final String LOCAL_TIME_ZONE_ID = "LOCAL";
     private static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID);
-    public static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"; // This is the format the app sets in NLS settings for every connection.
-    public static final Format DEFAULT_DATE_FORMATTER = FastDateFormat.getInstance(
-            DEFAULT_DATE_FORMAT, TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID));
-
+    
     public static final String DEFAULT_MS_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS";
     public static final Format DEFAULT_MS_DATE_FORMATTER = FastDateFormat.getInstance(
             DEFAULT_MS_DATE_FORMAT, TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID));
 
-    private static final DateTimeFormatter ISO_DATE_TIME_PARSER = new DateTimeFormatterBuilder()
-            .append(ISODateTimeFormat.dateParser())
-            .appendOptional(new DateTimeFormatterBuilder()
-                    .appendLiteral(' ').toParser())
-            .appendOptional(new DateTimeFormatterBuilder()
-                    .append(ISODateTimeFormat.timeParser()).toParser())
-            .toFormatter()
-            .withZoneUTC()
-            .withChronology(ISOChronology.getInstanceUTC());
+    public static final String DEFAULT_DATE_FORMAT = DEFAULT_MS_DATE_FORMAT;
+    public static final Format DEFAULT_DATE_FORMATTER = DEFAULT_MS_DATE_FORMATTER;
 
-    private DateUtil() {
-    }
+    public static final String DEFAULT_TIME_FORMAT = DEFAULT_MS_DATE_FORMAT;
+    public static final Format DEFAULT_TIME_FORMATTER = DEFAULT_MS_DATE_FORMATTER;
 
-    public static DateTimeParser getDateParser(String pattern, TimeZone timeZone) {
-        if(DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) &&
-                timeZone.getID().equalsIgnoreCase(DateUtil.DEFAULT_TIME_ZONE_ID)) {
-            return ISODateFormatParser.getInstance();
-        } else {
-            return new SimpleDateFormatParser(pattern, timeZone);
-        }
+    public static final String DEFAULT_TIMESTAMP_FORMAT = DEFAULT_MS_DATE_FORMAT;
+    public static final Format DEFAULT_TIMESTAMP_FORMATTER = DEFAULT_MS_DATE_FORMATTER;
+
+    private static final DateTimeFormatter ISO_DATE_TIME_FORMATTER = new DateTimeFormatterBuilder()
+        .append(ISODateTimeFormat.dateParser())
+        .appendOptional(new DateTimeFormatterBuilder()
+                .appendLiteral(' ').toParser())
+        .appendOptional(new DateTimeFormatterBuilder()
+                .append(ISODateTimeFormat.timeParser()).toParser())
+        .toFormatter().withChronology(ISOChronology.getInstanceUTC());
+    
+    private DateUtil() {
     }
 
-    public static DateTimeParser getDateParser(String pattern, String timeZoneId) {
-        if(timeZoneId == null) {
-            timeZoneId = DateUtil.DEFAULT_TIME_ZONE_ID;
-        }
+    private static TimeZone getTimeZone(String timeZoneId) {
         TimeZone parserTimeZone;
-        if ("LOCAL".equalsIgnoreCase(timeZoneId)) {
+        if (timeZoneId == null) {
+            parserTimeZone = DateUtil.DEFAULT_TIME_ZONE;
+        } else if (LOCAL_TIME_ZONE_ID.equalsIgnoreCase(timeZoneId)) {
             parserTimeZone = TimeZone.getDefault();
         } else {
             parserTimeZone = TimeZone.getTimeZone(timeZoneId);
         }
-        return getDateParser(pattern, parserTimeZone);
+        return parserTimeZone;
     }
-
-    public static DateTimeParser getDateParser(String pattern) {
-        return getDateParser(pattern, DEFAULT_TIME_ZONE);
-    }
-
-    public static DateTimeParser getTimeParser(String pattern, TimeZone timeZone) {
-        return getDateParser(pattern, timeZone);
+    
+    private static String[] defaultPattern;
+    static {
+        int maxOrdinal = Integer.MIN_VALUE;
+        List<PDataType> timeDataTypes = Lists.newArrayListWithExpectedSize(6);
+        for (PDataType type : PDataType.values()) {
+            if (java.util.Date.class.isAssignableFrom(type.getJavaClass())) {
+                timeDataTypes.add(type);
+                if (type.ordinal() > maxOrdinal) {
+                    maxOrdinal = type.ordinal();
+                }
+            }
+        }
+        defaultPattern = new String[maxOrdinal+1];
+        for (PDataType type : timeDataTypes) {
+            switch (type.getResultSetSqlType()) {
+            case Types.TIMESTAMP:
+                defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIMESTAMP_FORMAT;
+                break;
+            case Types.TIME:
+                defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIME_FORMAT;
+                break;
+            case Types.DATE:
+                defaultPattern[type.ordinal()] = DateUtil.DEFAULT_DATE_FORMAT;
+                break;
+            }
+        }
     }
-
-    public static DateTimeParser getTimeParser(String pattern) {
-        return getTimeParser(pattern, DEFAULT_TIME_ZONE);
+    
+    private static String getDefaultFormat(PDataType type) {
+        int ordinal = type.ordinal();
+        if (ordinal >= 0 || ordinal < defaultPattern.length) {
+            String format = defaultPattern[ordinal];
+            if (format != null) {
+                return format;
+            }
+        }
+        throw new IllegalArgumentException("Expected a date/time type, but got " + type);
     }
 
-    public static DateTimeParser getTimestampParser(String pattern, TimeZone timeZone) {
-        return getDateParser(pattern, timeZone);
+    public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType, String timeZoneId) {
+        TimeZone timeZone = getTimeZone(timeZoneId);
+        String defaultPattern = getDefaultFormat(pDataType);
+        if (pattern == null || pattern.length() == 0) {
+            pattern = defaultPattern;
+        }
+        if(defaultPattern.equals(pattern)) {
+            return ISODateFormatParserFactory.getParser(timeZone);
+        } else {
+            return new SimpleDateFormatParser(pattern, timeZone);
+        }
     }
 
-    public static DateTimeParser getTimestampParser(String pattern) {
-        return getTimestampParser(pattern, DEFAULT_TIME_ZONE);
+    public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType) {
+        return getDateTimeParser(pattern, pDataType, null);
     }
 
     public static Format getDateFormatter(String pattern) {
@@ -108,20 +145,32 @@ public class DateUtil {
                 : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE);
     }
 
-    public static Date parseDateTime(String dateTimeValue) {
+    public static Format getTimeFormatter(String pattern) {
+        return DateUtil.DEFAULT_TIME_FORMAT.equals(pattern)
+                ? DateUtil.DEFAULT_TIME_FORMATTER
+                : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE);
+    }
+
+    public static Format getTimestampFormatter(String pattern) {
+        return DateUtil.DEFAULT_TIMESTAMP_FORMAT.equals(pattern)
+                ? DateUtil.DEFAULT_TIMESTAMP_FORMATTER
+                : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE);
+    }
+
+    private static long parseDateTime(String dateTimeValue) {
         return ISODateFormatParser.getInstance().parseDateTime(dateTimeValue);
     }
 
     public static Date parseDate(String dateValue) {
-        return parseDateTime(dateValue);
+        return new Date(parseDateTime(dateValue));
     }
 
     public static Time parseTime(String timeValue) {
-        return new Time(parseDateTime(timeValue).getTime());
+        return new Time(parseDateTime(timeValue));
     }
 
     public static Timestamp parseTimestamp(String timestampValue) {
-        return new Timestamp(parseDateTime(timestampValue).getTime());
+        return new Timestamp(parseDateTime(timestampValue));
     }
 
     /**
@@ -145,7 +194,8 @@ public class DateUtil {
     }
 
     public static interface DateTimeParser {
-        public Date parseDateTime(String dateTimeString) throws IllegalDataException;
+        public long parseDateTime(String dateTimeString) throws IllegalDataException;
+        public TimeZone getTimeZone();
     }
 
     /**
@@ -168,41 +218,76 @@ public class DateUtil {
         }
 
         @Override
-        public Date parseDateTime(String dateTimeString) throws IllegalDataException {
+        public long parseDateTime(String dateTimeString) throws IllegalDataException {
             try {
                 java.util.Date date =parser.parse(dateTimeString);
-                return new java.sql.Date(date.getTime());
+                return date.getTime();
             } catch (ParseException e) {
-                throw new IllegalDataException("to_date('" + dateTimeString + "') did not match expected date format of '" + datePattern + "'.");
+                throw new IllegalDataException("Unable to parse date/time '" + dateTimeString + "' using format string of '" + datePattern + "'.");
             }
         }
+
+        @Override
+        public TimeZone getTimeZone() {
+            return parser.getTimeZone();
+        }
     }
 
+    private static class ISODateFormatParserFactory {
+        private ISODateFormatParserFactory() {}
+        
+        public static DateTimeParser getParser(final TimeZone timeZone) {
+            // If timeZone matches default, get singleton DateTimeParser
+            if (timeZone.equals(DEFAULT_TIME_ZONE)) {
+                return ISODateFormatParser.getInstance();
+            }
+            // Otherwise, create new DateTimeParser
+            return new DateTimeParser() {
+                private final DateTimeFormatter formatter = ISO_DATE_TIME_FORMATTER
+                        .withZone(DateTimeZone.forTimeZone(timeZone));
+
+                @Override
+                public long parseDateTime(String dateTimeString) throws IllegalDataException {
+                    try {
+                        return formatter.parseDateTime(dateTimeString).getMillis();
+                    } catch(IllegalArgumentException ex) {
+                        throw new IllegalDataException(ex);
+                    }
+                }
+
+                @Override
+                public TimeZone getTimeZone() {
+                    return timeZone;
+                }
+            };
+        }
+    }
     /**
      * This class is our default DateTime string parser
      */
     private static class ISODateFormatParser implements DateTimeParser {
-        private static ISODateFormatParser inst = null;
-        private static Object lock = new Object();
-        private ISODateFormatParser() {}
+        private static final ISODateFormatParser INSTANCE = new ISODateFormatParser();
 
         public static ISODateFormatParser getInstance() {
-            if(inst != null) return inst;
-
-            synchronized (lock) {
-                if (inst == null) {
-                    inst = new ISODateFormatParser();
-                }
-            }
-            return inst;
+            return INSTANCE;
         }
 
-        public Date parseDateTime(String dateTimeString) throws IllegalDataException {
+        private final DateTimeFormatter formatter = ISO_DATE_TIME_FORMATTER.withZoneUTC();
+
+        private ISODateFormatParser() {}
+
+        @Override
+        public long parseDateTime(String dateTimeString) throws IllegalDataException {
             try {
-                return new Date(ISO_DATE_TIME_PARSER.parseDateTime(dateTimeString).getMillis());
+                return formatter.parseDateTime(dateTimeString).getMillis();
             } catch(IllegalArgumentException ex) {
                 throw new IllegalDataException(ex);
             }
         }
+
+        @Override
+        public TimeZone getTimeZone() {
+            return formatter.getZone().toTimeZone();
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 731a13f..b5f6f9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -17,16 +17,22 @@
  */
 package org.apache.phoenix.util.csv;
 
-import com.google.common.base.Function;
-import com.google.common.collect.Lists;
+import java.io.Closeable;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.List;
+import java.util.Properties;
+
+import javax.annotation.Nullable;
+
 import org.apache.commons.csv.CSVRecord;
-import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PDate;
-import org.apache.phoenix.schema.types.PTime;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
@@ -34,16 +40,8 @@ import org.apache.phoenix.util.QueryUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nullable;
-import java.io.Closeable;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.Date;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.Properties;
-import java.util.TimeZone;
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
 
 /**
  * Executes upsert statements on a provided {@code PreparedStatement} based on incoming CSV records, notifying a
@@ -205,12 +203,23 @@ public class CsvUpsertExecutor implements Closeable {
                 throw new RuntimeException(e);
             }
             this.dataType = dataType;
-            if(dataType.equals(PDate.INSTANCE) || dataType.equals(PTime.INSTANCE) || dataType.equals(PTimestamp.INSTANCE)) {
-                String dateFormat = props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
-                        QueryServicesOptions.DEFAULT_DATE_FORMAT);
+            if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) {
+                // TODO: move to DateUtil
+                String dateFormat;
+                int dateSqlType = dataType.getResultSetSqlType();
+                if (dateSqlType == Types.DATE) {
+                    dateFormat = props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+                            DateUtil.DEFAULT_DATE_FORMAT);
+                } else if (dateSqlType == Types.TIME) {
+                    dateFormat = props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+                            DateUtil.DEFAULT_TIME_FORMAT);
+                } else {
+                    dateFormat = props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+                            DateUtil.DEFAULT_TIMESTAMP_FORMAT);                    
+                }
                 String timeZoneId = props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
                         QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
-                this.dateTimeParser = DateUtil.getDateParser(dateFormat, timeZoneId);
+                this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId);
             } else {
                 this.dateTimeParser = null;
             }
@@ -220,7 +229,10 @@ public class CsvUpsertExecutor implements Closeable {
         @Override
         public Object apply(@Nullable String input) {
             if(dateTimeParser != null) {
-                return dateTimeParser.parseDateTime(input);
+                long epochTime = dateTimeParser.parseDateTime(input);
+                byte[] byteValue = new byte[dataType.getByteSize()];
+                dataType.getCodec().encodeLong(epochTime, byteValue, 0);
+                return dataType.toObject(byteValue);
             }
             return dataType.toObject(input);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
index 6dbd303..69c1bbf 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
@@ -37,9 +37,9 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.math.BigDecimal;
+import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.SQLException;
-import java.text.Format;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -60,11 +60,11 @@ import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.RowKeyValueAccessor;
+import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.RowKeyValueAccessor;
-import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.NumberUtil;
@@ -277,7 +277,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
 
-        Object date = DateUtil.parseDateTime(dateStr);
+        Date date = DateUtil.parseDate(dateStr);
 
         assertEquals(
             singleKVFilter(constantComparison(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
index f75bb3e..8fb1a6c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
@@ -30,22 +30,6 @@ import java.util.TimeZone;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDecimal;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDate;
-import org.apache.phoenix.schema.types.PDouble;
-import org.apache.phoenix.schema.types.PFloat;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.schema.types.PLong;
-import org.apache.phoenix.schema.types.PUnsignedDouble;
-import org.apache.phoenix.schema.types.PUnsignedFloat;
-import org.apache.phoenix.schema.types.PUnsignedInt;
-import org.apache.phoenix.schema.types.PUnsignedLong;
-import org.apache.phoenix.schema.types.PVarchar;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
 import org.apache.phoenix.expression.function.FunctionArgumentType;
 import org.apache.phoenix.expression.function.LTrimFunction;
 import org.apache.phoenix.expression.function.LengthFunction;
@@ -63,8 +47,24 @@ import org.apache.phoenix.expression.function.ToNumberFunction;
 import org.apache.phoenix.expression.function.TrimFunction;
 import org.apache.phoenix.expression.function.UpperFunction;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.DateUtil;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
 
 /**
  * @since 1.2
@@ -148,7 +148,7 @@ public class SortOrderExpressionTest {
     @Test
     public void toDate() throws Exception {
         List<Expression> args = Lists.newArrayList(getInvertedLiteral("2001-11-30 00:00:00:0", PVarchar.INSTANCE));
-        evaluateAndAssertResult(new ToDateFunction(args, null, DateUtil.getDateParser("yyyy-MM-dd HH:mm:ss:S")), date(11, 30, 2001));
+        evaluateAndAssertResult(new ToDateFunction(args, "yyyy-MM-dd HH:mm:ss:S",DateUtil.DEFAULT_TIME_ZONE_ID), date(11, 30, 2001));
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11a76b29/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
index 702e556..ec0bc01 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
@@ -28,6 +28,9 @@ import java.text.ParseException;
 import java.util.TimeZone;
 
 import org.apache.phoenix.schema.IllegalDataException;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
+import org.apache.phoenix.schema.types.PTimestamp;
 import org.junit.Test;
 
 /**
@@ -68,64 +71,64 @@ public class DateUtilTest {
 
     @Test
     public void testGetDateParser_DefaultTimeZone() throws ParseException {
-        Date date = DateUtil.getDateParser("yyyy-MM-dd").parseDateTime("1970-01-01");
+        Date date = new Date(DateUtil.getDateTimeParser("yyyy-MM-dd", PDate.INSTANCE).parseDateTime("1970-01-01"));
         assertEquals(0, date.getTime());
     }
 
     @Test
     public void testGetDateParser_CustomTimeZone() throws ParseException {
-        Date date = DateUtil.getDateParser(
-                "yyyy-MM-dd", TimeZone.getTimeZone("GMT+1")).parseDateTime("1970-01-01");
+        Date date = new Date(DateUtil.getDateTimeParser(
+                "yyyy-MM-dd", PDate.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()).parseDateTime("1970-01-01"));
         assertEquals(-ONE_HOUR_IN_MILLIS, date.getTime());
     }
 
     @Test
     public void testGetDateParser_LocalTimeZone() throws ParseException {
-        Date date = DateUtil.getDateParser(
-                "yyyy-MM-dd", TimeZone.getDefault()).parseDateTime("1970-01-01");
+        Date date = new Date(DateUtil.getDateTimeParser(
+                "yyyy-MM-dd", PDate.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("1970-01-01"));
         assertEquals(Date.valueOf("1970-01-01"), date);
     }
 
     @Test
     public void testGetTimestampParser_DefaultTimeZone() throws ParseException {
-        Timestamp ts = new Timestamp(DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss")
-                .parseDateTime("1970-01-01 00:00:00").getTime());
+        Timestamp ts = new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE)
+                .parseDateTime("1970-01-01 00:00:00"));
         assertEquals(0, ts.getTime());
     }
 
     @Test
     public void testGetTimestampParser_CustomTimeZone() throws ParseException {
-        Timestamp ts = new Timestamp(DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss", TimeZone.getTimeZone("GMT+1"))
-                .parseDateTime("1970-01-01 00:00:00").getTime());
+        Timestamp ts = new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE, TimeZone.getTimeZone("GMT+1").getID())
+                .parseDateTime("1970-01-01 00:00:00"));
         assertEquals(-ONE_HOUR_IN_MILLIS, ts.getTime());
     }
 
     @Test
     public void testGetTimestampParser_LocalTimeZone() throws ParseException {
-        Timestamp ts = new Timestamp(DateUtil.getTimestampParser(
+        Timestamp ts = new Timestamp(DateUtil.getDateTimeParser(
                 "yyyy-MM-dd HH:mm:ss",
-                TimeZone.getDefault()).parseDateTime("1970-01-01 00:00:00").getTime());
+                PTimestamp.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("1970-01-01 00:00:00"));
         assertEquals(Timestamp.valueOf("1970-01-01 00:00:00"), ts);
     }
 
     @Test
     public void testGetTimeParser_DefaultTimeZone() throws ParseException {
-        Time time = new Time(DateUtil.getTimeParser("HH:mm:ss").parseDateTime("00:00:00").getTime());
+        Time time = new Time(DateUtil.getDateTimeParser("HH:mm:ss", PTime.INSTANCE).parseDateTime("00:00:00"));
         assertEquals(0, time.getTime());
     }
 
     @Test
     public void testGetTimeParser_CustomTimeZone() throws ParseException {
-        Time time = new Time(DateUtil.getTimeParser(
+        Time time = new Time(DateUtil.getDateTimeParser(
                 "HH:mm:ss",
-                TimeZone.getTimeZone("GMT+1")).parseDateTime("00:00:00").getTime());
+                PTime.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()).parseDateTime("00:00:00"));
         assertEquals(-ONE_HOUR_IN_MILLIS, time.getTime());
     }
 
     @Test
     public void testGetTimeParser_LocalTimeZone() throws ParseException {
-        Time time = new Time(DateUtil.getTimeParser(
-                "HH:mm:ss", TimeZone.getDefault()).parseDateTime("00:00:00").getTime());
+        Time time = new Time(DateUtil.getDateTimeParser(
+                "HH:mm:ss", PTime.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("00:00:00"));
         assertEquals(Time.valueOf("00:00:00"), time);
     }
 


[45/50] [abbrv] phoenix git commit: PHOENIX-1690 IndexOutOfBoundsException during SkipScanFilter interesect

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/49f06b33/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java
index a2b6115..fea1d91 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java
@@ -26,13 +26,13 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
-import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PChar;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -92,7 +92,8 @@ public class SkipScanFilterIntersectTest {
     @Parameters(name="{0} {4}")
     public static Collection<Object> data() {
         List<Object> testCases = Lists.newArrayList();
-        // Causes increment of slot 2 to increment slot 1
+        // Both ranges in second slot are required b/c first slot contains range and upper/lower
+        // values differ in this slot position.
         testCases.addAll(foreach(
                 new KeyRange[][] {{
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), false),
@@ -109,10 +110,70 @@ public class SkipScanFilterIntersectTest {
                 new KeyRange[][] {{
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false),
                     }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true),
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true),
                     }, {
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true),
                 }}));
+        // Only second range in second slot is required b/c though first slot contains range,
+        // upper/lower values do not differ in this slot position.
+        testCases.addAll(foreach(
+                new KeyRange[][] {{
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), false),
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true),
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true),
+                }},
+                new int[] {1,1,1},
+                Bytes.toBytes("j3A"),
+                Bytes.toBytes("j4C"),
+                new KeyRange[][] {{
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true),
+                }}));
+        // Test case exercising repositioning multiple times (initially to slot #2 and then again
+        // to slot #4). Because there's a range for slot #4 and the lower/upper values are different,
+        // all slot #5 ranges are part of the intersection.
+        testCases.addAll(foreach(
+                new KeyRange[][] {{
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true),
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), false),
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("z"), true, Bytes.toBytes("z"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true),                        
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), true),
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), true),                        
+                    }
+                },
+                new int[] {1,1,1,1,1},
+                Bytes.toBytes("bkCpM"),
+                Bytes.toBytes("bkCtD"),
+                new KeyRange[][] {{
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true),
+                    }, {
+                    PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), false),
+                    }, {
+                        PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true),                        
+                        PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), true),
+                        PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), true),                        
+                    }
+                }));
         // Single matching in the first 2 slots.
         testCases.addAll(foreach(
                 new KeyRange[][] {{


[13/50] [abbrv] phoenix git commit: PHOENIX-1644 Check for min HBase version before creating local index and provide means of disabling usage

Posted by ma...@apache.org.
PHOENIX-1644 Check for min HBase version before creating local index and provide means of disabling usage


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/54c4ed80
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/54c4ed80
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/54c4ed80

Branch: refs/heads/calcite
Commit: 54c4ed80869f6ac0011a65d1247285cfae0a6759
Parents: f3c675b
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Feb 6 14:43:04 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Feb 6 14:59:01 2015 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/DisableLocalIndexIT.java    | 99 ++++++++++++++++++++
 .../phoenix/exception/SQLExceptionCode.java     |  3 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |  5 +-
 .../query/ConnectionQueryServicesImpl.java      |  8 ++
 .../org/apache/phoenix/query/HTableFactory.java |  3 +-
 .../org/apache/phoenix/query/QueryServices.java |  1 +
 .../phoenix/query/QueryServicesOptions.java     |  1 +
 .../apache/phoenix/schema/MetaDataClient.java   | 13 ++-
 8 files changed, 127 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
new file mode 100644
index 0000000..5f18a1c
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class DisableLocalIndexIT extends BaseHBaseManagedTimeIT {
+    @BeforeClass
+    @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
+    public static void doSetup() throws Exception {
+        Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
+        // Must update config before starting server
+        props.put(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
+
+    @Test
+    public void testDisabledLocalIndexes() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.setAutoCommit(true);
+        String tableName = "DISABLE_LOCAL_INDEX_TEST";
+        conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')");
+        HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+        assertFalse(admin.tableExists(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
+        admin.close();
+        try {
+            HTableInterface t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
+            t.getTableDescriptor(); // Exception no longer thrown by getTable, but instead need to force an RPC
+            fail("Local index table should not have been created");
+        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
+            //expected
+        } finally {
+            admin.close();
+        }
+        
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
+        Connection tsconn = DriverManager.getConnection(getUrl(), props);
+        
+        tsconn.createStatement().execute("CREATE VIEW A.BAR(V1 VARCHAR) AS SELECT * FROM " + tableName);
+        tsconn.createStatement().execute("CREATE INDEX I1 ON A.BAR(V1)");
+        tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
+
+        try {
+            conn.createStatement().execute("CREATE LOCAL INDEX I2 ON " + tableName + "(k2)");
+            fail("Should not allow creation of local index");
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
+        }
+        try {
+            tsconn.createStatement().execute("CREATE LOCAL INDEX I2 ON A.BAR(k2, v1)");
+            fail("Should not allow creation of local index");
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 8a6b8d0..19e7cdf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -236,6 +236,9 @@ public enum SQLExceptionCode {
     CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot be specified for a column family that is not being added or modified"),
     CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property cannot be set when adding a column"),
     
+    NO_LOCAL_INDEXES(1054, "43A11", "Local secondary indexes are only supported for HBase version " + MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) + " and above."),
+    UNALLOWED_LOCAL_INDEXES(1055, "43A12", "Local secondary indexes are configured to not be allowed."),
+
     /** Sequence related */
     SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new Factory() {
         @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index b26f408..7ac2bb6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -52,17 +52,17 @@ import org.apache.phoenix.iterate.DelegateResultIterator;
 import org.apache.phoenix.iterate.MaterializedResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -274,6 +274,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "7");
     // Version below which we should disallow usage of mutable secondary indexing.
     public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10");
+    public static final int LOCAL_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "98", "9");
     /** Version below which we fall back on the generic KeyValueBuilder */
     public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 97efc43..6d58f57 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1069,6 +1069,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     private void ensureLocalIndexTableCreated(byte[] physicalTableName, Map<String, Object> tableProps, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits) throws SQLException, TableAlreadyExistsException {
+        
+        // If we're not allowing local indexes or the hbase version is too low,
+        // don't create the local index table
+        if (   !this.getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX) 
+            || getLowestClusterHBaseVersion() < PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) {
+                    return;
+        }
+        
         tableProps.put(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME, TRUE_BYTES_AS_STRING);
         HTableDescriptor desc = ensureTableCreated(physicalTableName, PTableType.TABLE, tableProps, families, splits, true);
         if (desc != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
index 447267c..7a10683 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 
 /**
@@ -48,7 +47,7 @@ public interface HTableFactory {
     static class HTableFactoryImpl implements HTableFactory {
         @Override
         public HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException {
-            return new HTable(tableName, connection, pool);
+            return connection.getTable(tableName, pool);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index ce9016d..d21695d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -123,6 +123,7 @@ public interface QueryServices extends SQLCloseable {
     public static final String MIN_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.min";
     public static final String MAX_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.max";
     public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.regionserver.index.handler.count";
+    public static final String ALLOW_LOCAL_INDEX_ATTRIB = "phoenix.index.allowLocalIndex";
 
     // Config parameters for for configuring tracing
     public static final String TRACING_FREQ_ATTRIB = "phoenix.trace.frequency";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 5913796..0f9139f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -144,6 +144,7 @@ public class QueryServicesOptions {
      */
     public static final int DEFAULT_INDEX_MIN_PRIORITY = 1000;
     public static final int DEFAULT_INDEX_HANDLER_COUNT = 30;
+    public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true;
 
     public static final int DEFAULT_TRACING_PAGE_SIZE = 100;
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c4ed80/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 09d2f66..effdb54 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -909,6 +909,16 @@ public class MetaDataClient {
         boolean retry = true;
         Short indexId = null;
         boolean allocateIndexId = false;
+        boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
+        int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
+        if (isLocalIndex) {
+            if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
+            }
+            if (hbaseVersion < PhoenixDatabaseMetaData.LOCAL_SI_VERSION_THRESHOLD) {
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
+            }
+        }
         while (true) {
             try {
                 ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
@@ -920,7 +930,6 @@ public class MetaDataClient {
                         throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
                     }
                 }
-                int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
                 if (!dataTable.isImmutableRows()) {
                     if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
@@ -960,7 +969,7 @@ public class MetaDataClient {
                  * 1) for a local index, as all local indexes will reside in the same HBase table
                  * 2) for a view on an index.
                  */
-                if (statement.getIndexType() == IndexType.LOCAL || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
+                if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
                     allocateIndexId = true;
                     // Next add index ID column
                     PDataType dataType = MetaDataUtil.getViewIndexIdDataType();


[17/50] [abbrv] phoenix git commit: PHOENIX-1142 Fix compiler errors and warnings from original checkin

Posted by ma...@apache.org.
PHOENIX-1142 Fix compiler errors and warnings from original checkin


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/47ca5958
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/47ca5958
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/47ca5958

Branch: refs/heads/calcite
Commit: 47ca5958a2334bef35da0d9ff938e0420a79b321
Parents: eaa7fbf
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Feb 6 18:12:49 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Feb 6 18:12:49 2015 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/util/DateUtil.java  |  5 +----
 .../java/org/apache/phoenix/util/DateUtilTest.java   | 15 +++++++--------
 2 files changed, 8 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/47ca5958/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
index 659f45e..fbc74ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
@@ -155,10 +155,6 @@ public class DateUtil {
         private String datePattern;
         private SimpleDateFormat parser;
 
-        public SimpleDateFormatParser(String pattern) {
-            this(pattern, DEFAULT_TIME_ZONE);
-        }
-
         public SimpleDateFormatParser(String pattern, TimeZone timeZone) {
             datePattern = pattern;
             parser = new SimpleDateFormat(pattern) {
@@ -171,6 +167,7 @@ public class DateUtil {
             parser.setTimeZone(timeZone);
         }
 
+        @Override
         public Date parseDateTime(String dateTimeString) throws IllegalDataException {
             try {
                 java.util.Date date =parser.parse(dateTimeString);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/47ca5958/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
index 1cca156..702e556 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
@@ -17,6 +17,10 @@
  */
 package org.apache.phoenix.util;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.sql.Date;
 import java.sql.Time;
 import java.sql.Timestamp;
@@ -26,11 +30,6 @@ import java.util.TimeZone;
 import org.apache.phoenix.schema.IllegalDataException;
 import org.junit.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 /**
  * Test class for {@link DateUtil}
  *
@@ -69,20 +68,20 @@ public class DateUtilTest {
 
     @Test
     public void testGetDateParser_DefaultTimeZone() throws ParseException {
-        Date date = (Date) DateUtil.getDateParser("yyyy-MM-dd").parseDateTime("1970-01-01");
+        Date date = DateUtil.getDateParser("yyyy-MM-dd").parseDateTime("1970-01-01");
         assertEquals(0, date.getTime());
     }
 
     @Test
     public void testGetDateParser_CustomTimeZone() throws ParseException {
-        Date date = (Date) DateUtil.getDateParser(
+        Date date = DateUtil.getDateParser(
                 "yyyy-MM-dd", TimeZone.getTimeZone("GMT+1")).parseDateTime("1970-01-01");
         assertEquals(-ONE_HOUR_IN_MILLIS, date.getTime());
     }
 
     @Test
     public void testGetDateParser_LocalTimeZone() throws ParseException {
-        Date date = (Date) DateUtil.getDateParser(
+        Date date = DateUtil.getDateParser(
                 "yyyy-MM-dd", TimeZone.getDefault()).parseDateTime("1970-01-01");
         assertEquals(Date.valueOf("1970-01-01"), date);
     }


[02/50] [abbrv] phoenix git commit: PHOENIX-1610 Incorrect subquery results caused by unpaired contextStack push/pop

Posted by ma...@apache.org.
PHOENIX-1610 Incorrect subquery results caused by unpaired contextStack push/pop


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c58f442
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c58f442
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c58f442

Branch: refs/heads/calcite
Commit: 1c58f442dea6c19745a119f53e47e78ce9e2ef76
Parents: d058a41
Author: maryannxue <we...@intel.com>
Authored: Thu Jan 29 11:42:24 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Thu Jan 29 11:42:24 2015 -0500

----------------------------------------------------------------------
 .../apache/phoenix/end2end/DerivedTableIT.java  |  9 +++
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 52 ++++++++--------
 .../org/apache/phoenix/end2end/SubqueryIT.java  | 10 ++--
 .../end2end/SubqueryUsingSortMergeJoinIT.java   | 10 ++--
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  6 +-
 .../apache/phoenix/compile/QueryCompiler.java   | 12 ++--
 .../apache/phoenix/execute/HashJoinPlan.java    | 63 ++++----------------
 .../phoenix/expression/InListExpression.java    |  9 +--
 .../apache/phoenix/join/HashCacheClient.java    | 58 +++++++++++++++---
 .../apache/phoenix/optimize/QueryOptimizer.java |  2 +-
 .../java/org/apache/phoenix/parse/HintNode.java |  8 +--
 11 files changed, 120 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
index 727293f..7a418bd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
@@ -667,6 +667,15 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
             assertEquals(2,rs.getInt(1));
 
             assertFalse(rs.next());
+            
+            // count (subquery)
+            query = "SELECT count(*) FROM (SELECT * FROM aTable WHERE (organization_id, entity_id) in (SELECT organization_id, entity_id FROM aTable WHERE a_byte != 8)) AS t";
+            statement = conn.prepareStatement(query);
+            rs = statement.executeQuery();
+            assertTrue (rs.next());
+            assertEquals(8,rs.getInt(1));
+
+            assertFalse(rs.next());
         } finally {
             conn.close();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 9a95ea4..a699d48 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -229,7 +229,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (I2.item_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (I2.item_id)",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -241,7 +241,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (I2.supplier_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (I2.supplier_id)",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -270,7 +270,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -415,11 +415,11 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id BETWEEN MIN/MAX OF (I.supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.supplier_id)\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col1 = rhs.col2
                  */
@@ -430,7 +430,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col2
                  */
@@ -442,7 +442,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col1 = rhs.col2
                  */
@@ -454,7 +454,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col2 = rhs.col3 - 1 AND lhs.col1 = rhs.col2
                  */
@@ -592,7 +592,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (I2.:item_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (I2.:item_id)",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -782,11 +782,11 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_SCHEMA + ".idx_item\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id BETWEEN MIN/MAX OF (I.0:supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col1 = rhs.col2
                  */
@@ -797,7 +797,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col2
                  */
@@ -809,7 +809,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col1 = rhs.col2
                  */
@@ -821,7 +821,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col2 = rhs.col3 - 1 AND lhs.col1 = rhs.col2
                  */
@@ -970,7 +970,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER "+ MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n"  +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (I2.:item_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (I2.:item_id)",
                 /*
                  * testSelfJoin()
                  *     SELECT i1.name, i2.name FROM joinItemTable i1 
@@ -984,7 +984,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (I2.0:supplier_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (I2.0:supplier_id)",
                 /*
                  * testStarJoin()
                  *     SELECT order_id, c.name, i.name iname, quantity, o.date 
@@ -1020,7 +1020,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_CUSTOMER_TABLE_DISPLAY_NAME+" [-32768]\n"+
                 "                    SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "                CLIENT MERGE SORT\n" +
-                "    DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF (O.item_id)",
+                "    DYNAMIC SERVER FILTER BY item_id IN (O.item_id)",
                 /*
                  * testSubJoin()
                  *     SELECT * FROM joinCustomerTable c 
@@ -1172,11 +1172,11 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 1(DELAYED EVALUATION)\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    DYNAMIC SERVER FILTER BY supplier_id BETWEEN MIN/MAX OF (I.0:supplier_id)\n" +
+                "    DYNAMIC SERVER FILTER BY supplier_id IN (I.0:supplier_id)\n" +
                 "    JOIN-SCANNER 4 ROW LIMIT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col1 = rhs.col2
                  */
@@ -1187,7 +1187,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT MERGE SORT",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col2
                  */
@@ -1199,7 +1199,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY COL0 IN (RHS.COL2)",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col1 = rhs.col2
                  */
@@ -1211,7 +1211,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1) IN ((RHS.COL1, RHS.COL2))",
                 /*
                  * testJoinWithKeyRangeOptimization()
-                 *     SELECT (*SKIP_SCAN_HASH_JOIN*) lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
+                 *     SELECT lhs.col0, lhs.col1, lhs.col2, rhs.col0, rhs.col1, rhs.col2 
                  *     FROM TEMP_TABLE_COMPOSITE_PK lhs 
                  *     JOIN TEMP_TABLE_COMPOSITE_PK rhs ON lhs.col0 = rhs.col1 AND lhs.col2 = rhs.col3 - 1 AND lhs.col1 = rhs.col2
                  */
@@ -3549,7 +3549,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
             conn.commit();
             
             // No leading part of PK
-            String query = "SELECT /*+ SKIP_SCAN_HASH_JOIN*/ lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
+            String query = "SELECT lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
                     + tempTableWithCompositePK + " lhs JOIN "
                     + tempTableWithCompositePK + " rhs ON lhs.col1 = rhs.col2";
             PreparedStatement statement = conn.prepareStatement(query);
@@ -3579,7 +3579,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
             assertEquals(plans[21], QueryUtil.getExplainPlan(rs));
             
             // Two parts of PK but only one leading part
-            query = "SELECT /*+ SKIP_SCAN_HASH_JOIN*/ lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
+            query = "SELECT lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
                     + tempTableWithCompositePK + " lhs JOIN "
                     + tempTableWithCompositePK + " rhs ON lhs.col2 = rhs.col3 AND lhs.col0 = rhs.col2";
             statement = conn.prepareStatement(query);
@@ -3600,7 +3600,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
             assertEquals(plans[22], QueryUtil.getExplainPlan(rs));
             
             // Two leading parts of PK
-            query = "SELECT /*+ SKIP_SCAN_HASH_JOIN*/ lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
+            query = "SELECT lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
                     + tempTableWithCompositePK + " lhs JOIN "
                     + tempTableWithCompositePK + " rhs ON lhs.col1 = rhs.col2 AND lhs.col0 = rhs.col1";
             statement = conn.prepareStatement(query);
@@ -3630,7 +3630,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
             assertEquals(plans[23], QueryUtil.getExplainPlan(rs));
             
             // All parts of PK
-            query = "SELECT /*+ SKIP_SCAN_HASH_JOIN*/ lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
+            query = "SELECT lhs.col0, lhs.col1, lhs.col2, lhs.col3, rhs.col0, rhs.col1, rhs.col2, rhs.col3 FROM " 
                     + tempTableWithCompositePK + " lhs JOIN "
                     + tempTableWithCompositePK + " rhs ON lhs.col1 = rhs.col2 AND lhs.col2 = rhs.col3 - 1 AND lhs.col0 = rhs.col1";
             statement = conn.prepareStatement(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index 066790c..85e562c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -139,11 +139,11 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
+                "            SKIP-SCAN-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
@@ -165,7 +165,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(O.item_id\\)\n" +
+                "            DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
                 "    DYNAMIC SERVER FILTER BY customer_id IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
@@ -289,7 +289,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(\\$\\d+.\\$\\d+\\)\n" +
+                "            DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "    AFTER-JOIN SERVER FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
@@ -314,7 +314,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(O.item_id\\)\n" +
+                "            DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
                 "            AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
                 "    DYNAMIC SERVER FILTER BY customer_id IN \\(\\$\\d+.\\$\\d+\\)"
                 }});

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
index bbb84ba..82b1c68 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
@@ -143,11 +143,11 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id, NAME\\]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY \\[item_id, NAME\\]\n" +
-                "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
+                "        SKIP-SCAN-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",            
 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
@@ -163,7 +163,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(O.item_id\\)\n" +
+                "        DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         testCases.add(new String[][] {
@@ -285,7 +285,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(\\$\\d+.\\$\\d+\\)\n" +
+                "        DYNAMIC SERVER FILTER BY item_id IN \\(\\$\\d+.\\$\\d+\\)\n" +
                 "CLIENT FILTER BY \\(\\$\\d+.\\$\\d+ IS NOT NULL OR \\$\\d+.\\$\\d+ IS NOT NULL\\)",
                 
                 "SORT-MERGE-JOIN \\(SEMI\\) TABLES\n" +
@@ -305,7 +305,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[item_id\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        DYNAMIC SERVER FILTER BY item_id BETWEEN MIN/MAX OF \\(O.item_id\\)\n" +
+                "        DYNAMIC SERVER FILTER BY item_id IN \\(O.item_id\\)\n" +
                 "        AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         return testCases;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 0a4ea7a..bcf26be 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -362,8 +362,9 @@ non_select_node returns [BindableStatement ret]
     |	s=create_sequence_node
     |	s=drop_sequence_node
     |   s=update_statistics_node
-    |   s=explain_node) { contextStack.pop();  $ret = s; }
+    |   s=explain_node) { $ret = s; }
     ;
+finally{ contextStack.pop(); }
     
 explain_node returns [BindableStatement ret]
     :   EXPLAIN q=oneStatement {$ret=factory.explain(q);}
@@ -567,8 +568,9 @@ select_node returns [SelectStatement ret]
         (HAVING having=expression)?
         (ORDER BY order=order_by)?
         (LIMIT l=limit)?
-        { ParseContext context = contextStack.pop(); $ret = factory.select(from, null, d!=null, sel, where, group, having, order, l, getBindCount(), context.isAggregate(), context.hasSequences()); }
+        { ParseContext context = contextStack.peek(); $ret = factory.select(from, null, d!=null, sel, where, group, having, order, l, getBindCount(), context.isAggregate(), context.hasSequences()); }
     ;
+finally{ contextStack.pop(); }
 
 // Parse a full select expression structure.
 hinted_select_node returns [SelectStatement ret]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 014e73a..9642489 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -101,6 +101,7 @@ public class QueryCompiler {
     private final ParallelIteratorFactory parallelIteratorFactory;
     private final SequenceManager sequenceManager;
     private final boolean useSortMergeJoin;
+    private final boolean noChildParentJoinOptimization;
 
     public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) throws SQLException {
         this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement));
@@ -115,6 +116,7 @@ public class QueryCompiler {
         this.parallelIteratorFactory = parallelIteratorFactory;
         this.sequenceManager = sequenceManager;
         this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN);
+        this.noChildParentJoinOptimization = select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION);
         if (statement.getConnection().getQueryServices().getLowestClusterHBaseVersion() >= PhoenixDatabaseMetaData.ESSENTIAL_FAMILY_VERSION_THRESHOLD) {
             this.scan.setAttribute(LOAD_COLUMN_FAMILIES_ON_DEMAND_ATTR, QueryConstants.TRUE);
         }
@@ -259,16 +261,14 @@ public class QueryCompiler {
                 joinExpressions[i] = joinConditions.getFirst();
                 List<Expression> hashExpressions = joinConditions.getSecond();
                 Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
-                boolean complete = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions);
+                boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions);
                 Expression keyRangeLhsExpression = keyRangeExpressions.getFirst();
                 Expression keyRangeRhsExpression = keyRangeExpressions.getSecond();
-                boolean hasFilters = joinSpec.getJoinTable().hasFilters();
-                boolean optimized = complete && hasFilters;
                 joinTypes[i] = joinSpec.getType();
                 if (i < count - 1) {
                     fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
                 }
-                subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression, hasFilters);
+                subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression);
             }
             if (needsProject) {
                 TupleProjector.serializeProjectorIntoScan(context.getScan(), initialProjectedTable.createTupleProjector());
@@ -339,7 +339,7 @@ public class QueryCompiler {
             HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit, forceProjection);
             Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
             getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);
-            return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond(), lhsJoin.hasFilters())});
+            return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())});
         }
 
         JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
@@ -395,7 +395,7 @@ public class QueryCompiler {
     }
 
     private boolean getKeyExpressionCombinations(Pair<Expression, Expression> combination, StatementContext context, SelectStatement select, TableRef table, JoinType type, final List<Expression> joinExpressions, final List<Expression> hashExpressions) throws SQLException {
-        if (type != JoinType.Inner && type != JoinType.Semi)
+        if ((type != JoinType.Inner && type != JoinType.Semi) || this.noChildParentJoinOptimization)
             return false;
 
         Scan scanCopy = ScanUtil.newScan(context.getScan());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index db2a29d..aea075d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -31,7 +31,6 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
 import org.apache.phoenix.compile.ColumnProjector;
@@ -44,8 +43,6 @@ import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.WhereCompiler;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.expression.AndExpression;
-import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.InListExpression;
@@ -58,7 +55,6 @@ import org.apache.phoenix.job.JobManager.JobCallable;
 import org.apache.phoenix.join.HashCacheClient;
 import org.apache.phoenix.join.HashJoinInfo;
 import org.apache.phoenix.parse.FilterableStatement;
-import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
@@ -82,8 +78,6 @@ public class HashJoinPlan extends DelegateQueryPlan {
     private final HashJoinInfo joinInfo;
     private final SubPlan[] subPlans;
     private final boolean recompileWhereClause;
-    private final boolean forceHashJoinRangeScan;
-    private final boolean forceHashJoinSkipScan;
     private List<SQLCloseable> dependencies;
     private HashCacheClient hashClient;
     private int maxServerCacheTimeToLive;
@@ -115,8 +109,6 @@ public class HashJoinPlan extends DelegateQueryPlan {
         this.joinInfo = joinInfo;
         this.subPlans = subPlans;
         this.recompileWhereClause = recompileWhereClause;
-        this.forceHashJoinRangeScan = plan.getStatement().getHint().hasHint(Hint.RANGE_SCAN_HASH_JOIN);
-        this.forceHashJoinSkipScan = plan.getStatement().getHint().hasHint(Hint.SKIP_SCAN_HASH_JOIN);
     }
 
     @Override
@@ -200,39 +192,14 @@ public class HashJoinPlan extends DelegateQueryPlan {
     }
 
     private Expression createKeyRangeExpression(Expression lhsExpression,
-            Expression rhsExpression, List<ImmutableBytesWritable> rhsValues, 
-            ImmutableBytesWritable ptr, boolean hasFilters) throws SQLException {
+            Expression rhsExpression, List<Expression> rhsValues, 
+            ImmutableBytesWritable ptr) throws SQLException {
         if (rhsValues.isEmpty())
-            return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS);
+            return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS);        
         
-        PDataType type = rhsExpression.getDataType();
-        if (!useInClause(hasFilters)) {
-            ImmutableBytesWritable minValue = rhsValues.get(0);
-            ImmutableBytesWritable maxValue = rhsValues.get(0);
-            for (ImmutableBytesWritable value : rhsValues) {
-                if (value.compareTo(minValue) < 0) {
-                    minValue = value;
-                }
-                if (value.compareTo(maxValue) > 0) {
-                    maxValue = value;
-                }
-            }
-            
-            return AndExpression.create(Lists.newArrayList(
-                    ComparisonExpression.create(CompareOp.GREATER_OR_EQUAL, Lists.newArrayList(lhsExpression, LiteralExpression.newConstant(type.toObject(minValue), type)), ptr), 
-                    ComparisonExpression.create(CompareOp.LESS_OR_EQUAL, Lists.newArrayList(lhsExpression, LiteralExpression.newConstant(type.toObject(maxValue), type)), ptr)));
-        }
+        rhsValues.add(0, lhsExpression);
         
-        List<Expression> children = Lists.newArrayList(lhsExpression);
-        for (ImmutableBytesWritable value : rhsValues) {
-            children.add(LiteralExpression.newConstant(type.toObject(value), type));
-        }
-        
-        return InListExpression.create(children, false, ptr, false);
-    }
-    
-    private boolean useInClause(boolean hasFilters) {
-        return this.forceHashJoinSkipScan || (!this.forceHashJoinRangeScan && hasFilters);
+        return InListExpression.create(rhsValues, false, ptr);
     }
 
     @Override
@@ -345,29 +312,26 @@ public class HashJoinPlan extends DelegateQueryPlan {
         private final boolean singleValueOnly;
         private final Expression keyRangeLhsExpression;
         private final Expression keyRangeRhsExpression;
-        private final boolean hasFilters;
         
         public HashSubPlan(int index, QueryPlan subPlan, 
                 List<Expression> hashExpressions,
                 boolean singleValueOnly,
                 Expression keyRangeLhsExpression, 
-                Expression keyRangeRhsExpression, 
-                boolean hasFilters) {
+                Expression keyRangeRhsExpression) {
             this.index = index;
             this.plan = subPlan;
             this.hashExpressions = hashExpressions;
             this.singleValueOnly = singleValueOnly;
             this.keyRangeLhsExpression = keyRangeLhsExpression;
             this.keyRangeRhsExpression = keyRangeRhsExpression;
-            this.hasFilters = hasFilters;
         }
 
         @Override
         public Object execute(HashJoinPlan parent) throws SQLException {
             ScanRanges ranges = parent.delegate.getContext().getScanRanges();
-            List<ImmutableBytesWritable> keyRangeRhsValues = null;
+            List<Expression> keyRangeRhsValues = null;
             if (keyRangeRhsExpression != null) {
-                keyRangeRhsValues = Lists.<ImmutableBytesWritable>newArrayList();
+                keyRangeRhsValues = Lists.<Expression>newArrayList();
             }
             ServerCache cache = null;
             if (hashExpressions != null) {
@@ -383,15 +347,11 @@ public class HashJoinPlan extends DelegateQueryPlan {
                 ResultIterator iterator = plan.iterator();
                 for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
                     // Evaluate key expressions for hash join key range optimization.
-                    ImmutableBytesWritable value = new ImmutableBytesWritable();
-                    keyRangeRhsExpression.reset();
-                    if (keyRangeRhsExpression.evaluate(result, value)) {
-                        keyRangeRhsValues.add(value);
-                    }
+                    keyRangeRhsValues.add(HashCacheClient.evaluateKeyExpression(keyRangeRhsExpression, result, plan.getContext().getTempPtr()));
                 }
             }
             if (keyRangeRhsValues != null) {
-                parent.keyRangeExpressions.add(parent.createKeyRangeExpression(keyRangeLhsExpression, keyRangeRhsExpression, keyRangeRhsValues, plan.getContext().getTempPtr(), hasFilters));
+                parent.keyRangeExpressions.add(parent.createKeyRangeExpression(keyRangeLhsExpression, keyRangeRhsExpression, keyRangeRhsValues, plan.getContext().getTempPtr()));
             }
             return cache;
         }
@@ -430,8 +390,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
                 return Collections.<String> emptyList();
             
             String step = "    DYNAMIC SERVER FILTER BY " + keyRangeLhsExpression.toString() 
-                    + (parent.useInClause(hasFilters) ? " IN " : " BETWEEN MIN/MAX OF ") 
-                    + "(" + keyRangeRhsExpression.toString() + ")";
+                    + " IN (" + keyRangeRhsExpression.toString() + ")";
             return Collections.<String> singletonList(step);
         }
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
index 772db97..63178db 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
@@ -58,19 +58,12 @@ public class InListExpression extends BaseSingleExpression {
     private List<Expression> keyExpressions; // client side only
 
     public static Expression create (List<Expression> children, boolean isNegate, ImmutableBytesWritable ptr) throws SQLException {
-        return create(children, isNegate, ptr, true);
-    }
-    
-    public static Expression create (List<Expression> children, boolean isNegate, ImmutableBytesWritable ptr, boolean allowShortcut) throws SQLException {
         Expression firstChild = children.get(0);
         
         if (firstChild.isStateless() && (!firstChild.evaluate(null, ptr) || ptr.getLength() == 0)) {
             return LiteralExpression.newConstant(null, PBoolean.INSTANCE, firstChild.getDeterminism());
         }
-        // We set allowShortcut to false for child/parent join optimization since we 
-        // compare RVC expressions with literal expressions and we want to avoid 
-        // RVC-rewrite operation in ComparisonExpression.create().
-        if (allowShortcut && children.size() == 2) {
+        if (children.size() == 2) {
             return ComparisonExpression.create(isNegate ? CompareOp.NOT_EQUAL : CompareOp.EQUAL, children, ptr);
         }
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
index 6494603..f13b28e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheClient.java
@@ -30,18 +30,22 @@ import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 import org.apache.phoenix.util.TupleUtil;
-
 import org.iq80.snappy.Snappy;
 
+import com.google.common.collect.Lists;
+
 /**
  * 
  * Client for adding cache of one side of a join to region servers
@@ -70,7 +74,7 @@ public class HashCacheClient  {
      * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed
      * size
      */
-    public ServerCache addHashCache(ScanRanges keyRanges, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, TableRef cacheUsingTableRef, Expression keyRangeRhsExpression, List<ImmutableBytesWritable> keyRangeRhsValues) throws SQLException {
+    public ServerCache addHashCache(ScanRanges keyRanges, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, TableRef cacheUsingTableRef, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
         /**
          * Serialize and compress hashCacheTable
          */
@@ -79,7 +83,7 @@ public class HashCacheClient  {
         return serverCache.addServerCache(keyRanges, ptr, new HashCacheFactory(), cacheUsingTableRef);
     }
     
-    private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<ImmutableBytesWritable> keyRangeRhsValues) throws SQLException {
+    private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
         long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
         estimatedSize = Math.min(estimatedSize, maxSize);
         if (estimatedSize > Integer.MAX_VALUE) {
@@ -98,6 +102,7 @@ public class HashCacheClient  {
             out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
             int nRows = 0;
             out.writeInt(nRows); // In the end will be replaced with total number of rows            
+            ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
                 TupleUtil.write(result, out);
                 if (baOut.size() > maxSize) {
@@ -105,11 +110,7 @@ public class HashCacheClient  {
                 }
                 // Evaluate key expressions for hash join key range optimization.
                 if (keyRangeRhsExpression != null) {
-                    ImmutableBytesWritable value = new ImmutableBytesWritable();
-                    keyRangeRhsExpression.reset();
-                    if (keyRangeRhsExpression.evaluate(result, value)) {
-                        keyRangeRhsValues.add(value);
-                    }
+                    keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
                 }
                 nRows++;
             }
@@ -136,4 +137,45 @@ public class HashCacheClient  {
             iterator.close();
         }
     }
+    
+    /**
+     * Evaluate the RHS key expression and wrap the result as a new Expression.
+     * Unlike other types of Expression which will be evaluated and wrapped as a 
+     * single LiteralExpression, RowValueConstructorExpression should be handled 
+     * differently. We should evaluate each child of RVC and wrap them into a new
+     * RVC Expression, in order to make sure that the later coercion between the 
+     * LHS key expression and this RHS key expression will be successful.
+     * 
+     * @param keyExpression the RHS key expression
+     * @param tuple the input tuple
+     * @param ptr the temporary pointer
+     * @return the Expression containing the evaluated result
+     * @throws SQLException 
+     */
+    public static Expression evaluateKeyExpression(Expression keyExpression, Tuple tuple, ImmutableBytesWritable ptr) throws SQLException {
+        if (!(keyExpression instanceof RowValueConstructorExpression)) {
+            PDataType type = keyExpression.getDataType();
+            keyExpression.reset();
+            if (keyExpression.evaluate(tuple, ptr)) {
+                return LiteralExpression.newConstant(type.toObject(ptr), type);
+            }
+            
+            return LiteralExpression.newConstant(null, type);
+        }
+        
+        List<Expression> children = keyExpression.getChildren();
+        List<Expression> values = Lists.newArrayListWithExpectedSize(children.size());
+        for (Expression child : children) {
+            PDataType type = child.getDataType();
+            child.reset();
+            if (child.evaluate(tuple, ptr)) {
+                values.add(LiteralExpression.newConstant(type.toObject(ptr), type));
+            } else {
+                values.add(LiteralExpression.newConstant(null, type));
+            }
+        }
+        // The early evaluation of this constant expression is not necessary, for it
+        // might be coerced later.
+        return new RowValueConstructorExpression(values, false);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 7a2d313..9c5c2cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -291,7 +291,7 @@ public class QueryOptimizer {
                         if (extractedCondition != null) {
                             outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
                         }
-                        HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] {Hint.INDEX, Hint.RANGE_SCAN_HASH_JOIN}), FACTORY.hint("NO_INDEX SKIP_SCAN_HASH_JOIN"));
+                        HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] {Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION}), FACTORY.hint("NO_INDEX"));
                         SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
                         ColumnResolver queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                         query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c58f442/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
index 5ee8016..94f9bfb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
@@ -47,13 +47,9 @@ public class HintNode {
          */
         SKIP_SCAN,
         /**
-         * Forces a range scan when full or partial primary key is used as join keys.
+         * Prevents the usage of child-parent-join optimization.
          */
-        RANGE_SCAN_HASH_JOIN,
-        /**
-         * Forces a skip scan when full or partial primary key is used as join keys.
-         */
-        SKIP_SCAN_HASH_JOIN,
+        NO_CHILD_PARENT_JOIN_OPTIMIZATION,
         /**
         * Prevents the usage of indexes, forcing usage
         * of the data table for a query.


[33/50] [abbrv] phoenix git commit: PHOENIX-1667 Update sqlline to 1.1.8

Posted by ma...@apache.org.
PHOENIX-1667 Update sqlline to 1.1.8


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b8c0559c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b8c0559c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b8c0559c

Branch: refs/heads/calcite
Commit: b8c0559c0d1f7f5907ffab6dba43991929692b59
Parents: e09a8f8
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Feb 16 18:08:57 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Feb 16 18:08:57 2015 -0800

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8c0559c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 47363da..74a8a36 100644
--- a/pom.xml
+++ b/pom.xml
@@ -91,7 +91,7 @@
     <commons-lang.version>2.5</commons-lang.version>
     <commons-logging.version>1.1.1</commons-logging.version>
     <commons-csv.version>1.0</commons-csv.version>
-    <sqlline.version>1.1.6</sqlline.version>
+    <sqlline.version>1.1.8</sqlline.version>
     <guava.version>12.0.1</guava.version>
     <jackson.version>1.8.8</jackson.version>
     <flume.version>1.4.0</flume.version>


[25/50] [abbrv] phoenix git commit: PHOENIX-1646 Views and functional index expressions may lose information when stringified

Posted by ma...@apache.org.
PHOENIX-1646 Views and functional index expressions may lose information when stringified


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2730e874
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2730e874
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2730e874

Branch: refs/heads/calcite
Commit: 2730e874d4632e681d2e21d1df88e4516e87e72e
Parents: abeaa74
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Feb 10 01:05:06 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Feb 10 01:05:06 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/DerivedTableIT.java  |  6 ++--
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 30 ++++++++++----------
 .../phoenix/end2end/HashJoinLocalIndexIT.java   |  2 +-
 .../org/apache/phoenix/end2end/SubqueryIT.java  | 16 +++++------
 .../end2end/SubqueryUsingSortMergeJoinIT.java   | 24 ++++++++--------
 .../java/org/apache/phoenix/end2end/ViewIT.java |  2 +-
 .../end2end/index/BaseMutableIndexIT.java       |  6 ++--
 .../index/GlobalIndexOptimizationIT.java        |  4 +--
 .../phoenix/end2end/index/LocalIndexIT.java     |  4 +--
 .../phoenix/compile/IndexStatementRewriter.java |  5 ++--
 10 files changed, 49 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
index 7a418bd..7443267 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
@@ -92,15 +92,15 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
                 "CREATE INDEX ATABLE_DERIVED_IDX ON aTable (a_byte) INCLUDE (A_STRING, B_STRING)" 
                 }, {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE_DERIVED_IDX\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"A_STRING\", \"B_STRING\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [B_STRING]\n" +
+                "CLIENT SORTED BY [\"B_STRING\"]\n" +
                 "CLIENT SORTED BY [A]\n" +
                 "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
                 "CLIENT SORTED BY [A DESC]",
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE_DERIVED_IDX\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"A_STRING\", \"B_STRING\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
                 "CLIENT DISTINCT ON [COLLECTDISTINCT(B)]"}});

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 76eab22..5d2f522 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -478,9 +478,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     GROUP BY i.name ORDER BY i.name
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.0:NAME]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.0:NAME]\n" +
+                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY",
@@ -518,7 +518,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.0:NAME]\n" +
+                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
@@ -601,7 +601,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER SORTED BY [I1.0:NAME, I2.0:NAME]\n" +
+                "    SERVER SORTED BY [\"I1.0:NAME\", \"I2.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item",
@@ -651,14 +651,14 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY c.customer_id, i.name
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " [*] - ['0000000005']\n" +
-                "    SERVER SORTED BY [\"C.customer_id\", I.0:NAME]\n" +
+                "    SERVER SORTED BY [\"C.customer_id\", \"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
-                "                    SERVER FILTER BY NAME != 'T3'\n" +
+                "                    SERVER FILTER BY \"NAME\" != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
                 "    DYNAMIC SERVER FILTER BY \"customer_id\" IN (\"O.customer_id\")",
@@ -750,7 +750,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
-                "                    SERVER FILTER BY NAME != 'T3'\n" +
+                "                    SERVER FILTER BY \"NAME\" != 'T3'\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME,
                 /*
@@ -845,9 +845,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     GROUP BY i.name ORDER BY i.name
                  */     
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.0:NAME]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.0:NAME]\n" +
+                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
@@ -887,9 +887,9 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_ITEM_TABLE_DISPLAY_NAME+" [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.0:NAME]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.0:NAME]\n" +
+                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -979,7 +979,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n"  +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER SORTED BY [I1.0:NAME, I2.0:NAME]\n" +
+                "    SERVER SORTED BY [\"I1.0:NAME\", \"I2.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
@@ -1035,14 +1035,14 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     ORDER BY c.customer_id, i.name
                  */
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + " [*] - ['0000000005']\n" +
-                "    SERVER SORTED BY [\"C.customer_id\", I.0:NAME]\n"+
+                "    SERVER SORTED BY [\"C.customer_id\", \"I.0:NAME\"]\n"+
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
-                "                    SERVER FILTER BY NAME != 'T3'\n" +
+                "                    SERVER FILTER BY \"NAME\" != 'T3'\n" +
                 "                CLIENT MERGE SORT\n" +
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
@@ -1137,7 +1137,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " +  MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
-                "                    SERVER FILTER BY NAME != 'T3'\n" +
+                "                    SERVER FILTER BY \"NAME\" != 'T3'\n" +
                 "                CLIENT MERGE SORT\n" +      
                 "                    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "                        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
index 45e80c6..2d0cc72 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinLocalIndexIT.java
@@ -109,7 +109,7 @@ public class HashJoinLocalIndexIT extends BaseHBaseManagedTimeIT {
                 
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768,'S1']\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [S.0:PHONE]\n" +
+                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"S.0:PHONE\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768,*] - [-32768,'T6']\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index 2d11c5c..7bc97e7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -186,7 +186,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_supplier\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "    SERVER SORTED BY [I.0:NAME]\n" +
+                "    SERVER SORTED BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
@@ -200,7 +200,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -209,7 +209,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -237,7 +237,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
-                "            AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)"
+                "            AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)"
                 }});
         testCases.add(new String[][] {
                 {
@@ -259,7 +259,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                             
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
-                "    SERVER SORTED BY [I.0:NAME]\n" +
+                "    SERVER SORTED BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
@@ -274,7 +274,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -283,7 +283,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -315,7 +315,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "                CLIENT MERGE SORT\n" +
                 "            DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
-                "            AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
+                "            AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)\n" +
                 "    DYNAMIC SERVER FILTER BY \"customer_id\" IN \\(\\$\\d+.\\$\\d+\\)"
                 }});
         return testCases;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
index 7457e02..f931bae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
@@ -188,7 +188,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY [\"item_id\"]\n" +
-                "CLIENT SORTED BY [I.0:NAME]",
+                "CLIENT SORTED BY [\"I.0:NAME\"]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
                 "    SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -197,9 +197,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -208,9 +208,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -234,7 +234,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
-                "        AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
+                "        AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         testCases.add(new String[][] {
                 {
@@ -258,7 +258,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY [\"item_id\"]\n" +
-                "CLIENT SORTED BY [I.0:NAME]",
+                "CLIENT SORTED BY [\"I.0:NAME\"]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
                 "    SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -267,9 +267,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "        CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -278,9 +278,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[NAME, \"item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\"NAME\", \"item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"item_id\", NAME\\]\n" +
+                "    CLIENT SORTED BY \\[\"item_id\", \"NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -306,7 +306,7 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
                 "            CLIENT MERGE SORT\n" +
                 "        DYNAMIC SERVER FILTER BY \"item_id\" IN \\(\"O.item_id\"\\)\n" +
-                "        AFTER-JOIN SERVER FILTER BY \\(I.0:NAME = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
+                "        AFTER-JOIN SERVER FILTER BY \\(\"I.0:NAME\" = 'T2' OR O.QUANTITY > \\$\\d+.\\$\\d+\\)",
                 }});
         return testCases;
     }    

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index db1e58f..003db4c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -451,6 +451,6 @@ public class ViewIT extends BaseViewIT {
         String queryPlan = QueryUtil.getExplainPlan(rs);
         assertEquals(
                 "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" + 
-                "    SERVER FILTER BY (S2 = 'bas' AND \"S1\" = 'foo')", queryPlan);
+                "    SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')", queryPlan);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseMutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseMutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseMutableIndexIT.java
index db1f6fb..b2f8630 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseMutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseMutableIndexIT.java
@@ -314,7 +314,7 @@ public abstract class BaseMutableIndexIT extends BaseHBaseManagedTimeIT {
                 query = "SELECT b.* from " + TestUtil.DEFAULT_DATA_TABLE_FULL_NAME + " where int_col1 = 4";
                 rs = conn.createStatement().executeQuery("EXPLAIN " + query);
                 assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_" + TestUtil.DEFAULT_DATA_TABLE_FULL_NAME +" [-32768]\n" +
-                		"    SERVER FILTER BY TO_INTEGER(INT_COL1) = 4\nCLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
+                		"    SERVER FILTER BY TO_INTEGER(\"INT_COL1\") = 4\nCLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
                 rs = conn.createStatement().executeQuery(query);
                 assertTrue(rs.next());
                 assertEquals("varchar_b", rs.getString(1));
@@ -391,11 +391,11 @@ public abstract class BaseMutableIndexIT extends BaseHBaseManagedTimeIT {
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         if(localIndex){
             assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_" +TestUtil.DEFAULT_DATA_TABLE_FULL_NAME + " [-32768,~'1']\n" + 
-                    "    SERVER SORTED BY [V1]\n" + 
+                    "    SERVER SORTED BY [\"V1\"]\n" + 
                     "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
         } else {
             assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " +TestUtil.DEFAULT_INDEX_TABLE_FULL_NAME + " [~'1']\n" + 
-                    "    SERVER SORTED BY [V1]\n" + 
+                    "    SERVER SORTED BY [\"V1\"]\n" + 
                     "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index e54e6a2..932c68b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -322,8 +322,8 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SERVER FILTER BY V1 = 'a'\n" +
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_T \\[-32768,1\\] - \\[-32768,2\\]\n" +
-                    "            SERVER FILTER BY FIRST KEY ONLY AND K2 IN \\(3,4\\)\n" +
-                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "            SERVER FILTER BY FIRST KEY ONLY AND \"K2\" IN (3,4)\n" +
+                    "    DYNAMIC SERVER FILTER BY (\"T_ID\", \"K1\", \"K2\") IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected,actual));
             
             rs = conn1.createStatement().executeQuery(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 6ff0475..9e66bbf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -471,7 +471,7 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
                         + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)+" [-32768,*] - [-32768,'z']\n"
                         + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [V1, T_ID, K3]\n" + "CLIENT MERGE SORT",
+                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"V1\", \"T_ID\", \"K3\"]\n" + "CLIENT MERGE SORT",
                 QueryUtil.getExplainPlan(rs));
             
             rs = conn1.createStatement().executeQuery(query);
@@ -499,7 +499,7 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
                         + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)+" [-32768,*] - [-32768,'z']\n"
                         + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [V1]\nCLIENT MERGE SORT",
+                        + "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [\"V1\"]\nCLIENT MERGE SORT",
                 QueryUtil.getExplainPlan(rs));
             
             PhoenixStatement stmt = conn1.createStatement().unwrap(PhoenixStatement.class);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2730e874/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
index 4c66dd7..6f2cbfa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
@@ -34,8 +34,8 @@ import org.apache.phoenix.parse.TableWildcardParseNode;
 import org.apache.phoenix.parse.WildcardParseNode;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.IndexUtil;
 
 public class IndexStatementRewriter extends ParseNodeRewriter {
@@ -107,8 +107,7 @@ public class IndexStatementRewriter extends ParseNodeRewriter {
             return node;
 
         String indexColName = IndexUtil.getIndexColumnName(dataCol);
-        // FIXME: why isn't this always case sensitive?
-        ParseNode indexColNode = new ColumnParseNode(tName, node.isCaseSensitive() ? '"' + indexColName + '"' : indexColName, node.getAlias());
+        ParseNode indexColNode = new ColumnParseNode(tName, '"' + indexColName + '"', node.getAlias());
         PDataType indexColType = IndexUtil.getIndexColumnDataType(dataCol);
         PDataType dataColType = dataColRef.getColumn().getDataType();
 


[40/50] [abbrv] phoenix git commit: Revert "Surface partial saves in CommitExcepiton (PHOENIX-900) from https://github.com/apache/phoenix/pull/37"

Posted by ma...@apache.org.
Revert "Surface partial saves in CommitExcepiton (PHOENIX-900) from https://github.com/apache/phoenix/pull/37"

This reverts commit fa58c7821a2e8fce30a8c0ff6e42aa00134dbce0.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/569469a4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/569469a4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/569469a4

Branch: refs/heads/calcite
Commit: 569469a46bae57cc4d6cbbcd7e01d535560f07e2
Parents: fa58c78
Author: Eli Levine <el...@apache.org>
Authored: Fri Feb 27 11:15:28 2015 -0800
Committer: Eli Levine <el...@apache.org>
Committed: Fri Feb 27 11:15:28 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/execute/PartialCommitIT.java | 302 -------------------
 .../apache/phoenix/compile/DeleteCompiler.java  |  13 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |  13 +-
 .../apache/phoenix/execute/CommitException.java |  35 +--
 .../apache/phoenix/execute/MutationState.java   | 156 ++++------
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  37 +--
 .../phoenix/jdbc/PhoenixPreparedStatement.java  |   7 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   3 -
 .../phoenix/execute/MutationStateTest.java      |  64 ----
 .../java/org/apache/phoenix/query/BaseTest.java |   2 +-
 10 files changed, 89 insertions(+), 543 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
deleted file mode 100644
index 550d7de..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright 2014 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- *distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.execute;
-
-import static com.google.common.collect.Lists.newArrayList;
-import static com.google.common.collect.Sets.newHashSet;
-import static java.util.Collections.singletonList;
-import static org.apache.phoenix.query.BaseTest.initAndRegisterDriver;
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
-import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.Driver;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.TableRef;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class PartialCommitIT {
-    
-    private static final String TABLE_NAME_TO_FAIL = "b_failure_table".toUpperCase();
-    private static final byte[] ROW_TO_FAIL = Bytes.toBytes("fail me");
-    private static final String UPSERT_TO_FAIL = "upsert into " + TABLE_NAME_TO_FAIL + " values ('" + Bytes.toString(ROW_TO_FAIL) + "', 'boom!')";
-    private static final String UPSERT_SELECT_TO_FAIL = "upsert into " + TABLE_NAME_TO_FAIL + " select k, c from a_success_table";
-    private static final String DELETE_TO_FAIL = "delete from " + TABLE_NAME_TO_FAIL + "  where k='z'";
-    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-    private static String url;
-    private static Driver driver;
-    private static final Properties props = new Properties();
-    
-    static {
-        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 10);
-    }
-    
-    @BeforeClass
-    public static void setupCluster() throws Exception {
-      Configuration conf = TEST_UTIL.getConfiguration();
-      setUpConfigForMiniCluster(conf);
-      conf.setClass("hbase.coprocessor.region.classes", FailingRegionObserver.class, RegionObserver.class);
-      conf.setBoolean("hbase.coprocessor.abortonerror", false);
-      conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-      TEST_UTIL.startMiniCluster();
-      String clientPort = TEST_UTIL.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
-      url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
-              + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
-
-      Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
-      // Must update config before starting server
-      props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-      driver = initAndRegisterDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
-      createTablesWithABitOfData();
-    }
-    
-    private static void createTablesWithABitOfData() throws Exception {
-        Properties props = new Properties();
-        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 10);
-
-        try (Connection con = driver.connect(url, new Properties())) {
-            Statement sta = con.createStatement();
-            sta.execute("create table a_success_table (k varchar primary key, c varchar)");
-            sta.execute("create table b_failure_table (k varchar primary key, c varchar)");
-            sta.execute("create table c_success_table (k varchar primary key, c varchar)");
-            con.commit();
-        }
-
-        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 100);
-
-        try (Connection con = driver.connect(url, new Properties())) {
-            con.setAutoCommit(false);
-            Statement sta = con.createStatement();
-            for (String table : newHashSet("a_success_table", TABLE_NAME_TO_FAIL, "c_success_table")) {
-                sta.execute("upsert into " + table + " values ('z', 'z')");
-                sta.execute("upsert into " + table + " values ('zz', 'zz')");
-                sta.execute("upsert into " + table + " values ('zzz', 'zzz')");
-            }
-            con.commit();
-        }
-    }
-    
-    @AfterClass
-    public static void teardownCluster() throws Exception {
-      TEST_UTIL.shutdownMiniCluster();
-    }
-    
-    @Test
-    public void testNoFailure() {
-        testPartialCommit(singletonList("upsert into a_success_table values ('testNoFailure', 'a')"), 0, new int[0], false,
-                                        singletonList("select count(*) from a_success_table where k='testNoFailure'"), singletonList(new Integer(1)));
-    }
-    
-    @Test
-    public void testUpsertFailure() {
-        testPartialCommit(newArrayList("upsert into a_success_table values ('testUpsertFailure1', 'a')", 
-                                       UPSERT_TO_FAIL, 
-                                       "upsert into a_success_table values ('testUpsertFailure2', 'b')"), 
-                                       1, new int[]{1}, true,
-                                       newArrayList("select count(*) from a_success_table where k like 'testUpsertFailure_'",
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
-                                       newArrayList(new Integer(2), new Integer(0)));
-    }
-    
-    @Test
-    public void testUpsertSelectFailure() throws SQLException {
-        props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 100);
-
-        try (Connection con = driver.connect(url, new Properties())) {
-            con.createStatement().execute("upsert into a_success_table values ('" + Bytes.toString(ROW_TO_FAIL) + "', 'boom!')");
-            con.commit();
-        }
-        
-        testPartialCommit(newArrayList("upsert into a_success_table values ('testUpsertSelectFailure', 'a')", 
-                                       UPSERT_SELECT_TO_FAIL), 
-                                       1, new int[]{1}, true,
-                                       newArrayList("select count(*) from a_success_table where k in ('testUpsertSelectFailure', '" + Bytes.toString(ROW_TO_FAIL) + "')",
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
-                                       newArrayList(new Integer(2), new Integer(0)));
-    }
-    
-    @Test
-    public void testDeleteFailure() {
-        testPartialCommit(newArrayList("upsert into a_success_table values ('testDeleteFailure1', 'a')", 
-                                       DELETE_TO_FAIL,
-                                       "upsert into a_success_table values ('testDeleteFailure2', 'b')"), 
-                                       1, new int[]{1}, true,
-                                       newArrayList("select count(*) from a_success_table where k like 'testDeleteFailure_'",
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = 'z'"), 
-                                       newArrayList(new Integer(2), new Integer(1)));
-    }
-    
-    /**
-     * {@link MutationState} keeps mutations ordered lexicographically by table name.
-     */
-    @Test
-    public void testOrderOfMutationsIsPredicatable() {
-        testPartialCommit(newArrayList("upsert into c_success_table values ('testOrderOfMutationsIsPredicatable', 'c')", // will fail because c_success_table is after b_failure_table by table sort order
-                                       UPSERT_TO_FAIL, 
-                                       "upsert into a_success_table values ('testOrderOfMutationsIsPredicatable', 'a')"), // will succeed because a_success_table is before b_failure_table by table sort order
-                                       2, new int[]{0,1}, true,
-                                       newArrayList("select count(*) from c_success_table where k='testOrderOfMutationsIsPredicatable'",
-                                                    "select count(*) from a_success_table where k='testOrderOfMutationsIsPredicatable'",
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + Bytes.toString(ROW_TO_FAIL) + "'"), 
-                                       newArrayList(new Integer(0), new Integer(1), new Integer(0)));
-    }
-    
-    @Test
-    public void checkThatAllStatementTypesMaintainOrderInConnection() {
-        testPartialCommit(newArrayList("upsert into a_success_table values ('k', 'checkThatAllStatementTypesMaintainOrderInConnection')", 
-                                       "upsert into a_success_table select k, c from c_success_table",
-                                       DELETE_TO_FAIL,
-                                       "select * from a_success_table", 
-                                       UPSERT_TO_FAIL), 
-                                       2, new int[]{2,4}, true,
-                                       newArrayList("select count(*) from a_success_table where k='testOrderOfMutationsIsPredicatable' or k like 'z%'", // rows left: zz, zzz, checkThatAllStatementTypesMaintainOrderInConnection
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = '" + ROW_TO_FAIL + "'",
-                                                    "select count(*) from " + TABLE_NAME_TO_FAIL + " where k = 'z'"), 
-                                       newArrayList(new Integer(4), new Integer(0), new Integer(1)));
-    }
-    
-    private void testPartialCommit(List<String> statements, int failureCount, int[] expectedUncommittedStatementIndexes, boolean willFail,
-                                   List<String> countStatementsForVerification, List<Integer> expectedCountsForVerification) {
-        Preconditions.checkArgument(countStatementsForVerification.size() == expectedCountsForVerification.size());
-        
-        try (Connection con = getConnectionWithTableOrderPreservingMutationState()) {
-            con.setAutoCommit(false);
-            Statement sta = con.createStatement();
-            for (String statement : statements) {
-                sta.execute(statement);
-            }
-            try {
-                con.commit();
-                if (willFail) {
-                    fail("Expected at least one statement in the list to fail");
-                } else {
-                    assertEquals(0, con.unwrap(PhoenixConnection.class).getStatementExecutionCounter()); // should have been reset to 0 in commit()
-                }
-            } catch (SQLException sqle) {
-                if (!willFail) {
-                    fail("Expected no statements to fail");
-                }
-                assertEquals(CommitException.class, sqle.getClass());
-                int[] uncommittedStatementIndexes = ((CommitException)sqle).getUncommittedStatementIndexes();
-                assertEquals(failureCount, uncommittedStatementIndexes.length);
-                assertArrayEquals(expectedUncommittedStatementIndexes, uncommittedStatementIndexes);
-            }
-            
-            // verify data in HBase
-            for (int i = 0; i < countStatementsForVerification.size(); i++) {
-                String countStatement = countStatementsForVerification.get(i);
-                ResultSet rs = sta.executeQuery(countStatement);
-                if (!rs.next()) {
-                    fail("Expected a single row from count query");
-                }
-                assertEquals(expectedCountsForVerification.get(i).intValue(), rs.getInt(1));
-            }
-        } catch (SQLException e) {
-            fail(e.toString());
-        }
-    }
-    
-    private PhoenixConnection getConnectionWithTableOrderPreservingMutationState() throws SQLException {
-        Connection con = driver.connect(url, new Properties());
-        PhoenixConnection phxCon = new PhoenixConnection(con.unwrap(PhoenixConnection.class));
-        final Map<TableRef,Map<ImmutableBytesPtr,MutationState.RowMutationState>> mutations = Maps.newTreeMap(new TableRefComparator());
-        return new PhoenixConnection(phxCon) {
-            protected MutationState newMutationState(int maxSize) {
-                return new MutationState(maxSize, this, mutations);
-            };
-        };
-    }
-    
-    public static class FailingRegionObserver extends SimpleRegionObserver {
-        @Override
-        public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
-                final Durability durability) throws HBaseIOException {
-            if (shouldFailUpsert(c, put) || shouldFailDelete(c, put)) {
-                // throwing anything other than instances of IOException result
-                // in this coprocessor being unloaded
-                // DoNotRetryIOException tells HBase not to retry this mutation
-                // multiple times
-                throw new DoNotRetryIOException();
-            }
-        }
-        
-        private static boolean shouldFailUpsert(ObserverContext<RegionCoprocessorEnvironment> c, Put put) {
-            String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
-            return TABLE_NAME_TO_FAIL.equals(tableName) && Bytes.equals(ROW_TO_FAIL, put.getRow());
-        }
-        
-        private static boolean shouldFailDelete(ObserverContext<RegionCoprocessorEnvironment> c, Put put) {
-            String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
-            return TABLE_NAME_TO_FAIL.equals(tableName) &&  
-                   // Phoenix deletes are sent as Puts with empty values
-                   put.getFamilyCellMap().firstEntry().getValue().get(0).getValueLength() == 0; 
-        }
-    }
-    
-    /**
-     * Used for ordering {@link MutationState#mutations} map.
-     */
-    private static class TableRefComparator implements Comparator<TableRef> {
-        @Override
-        public int compare(TableRef tr1, TableRef tr2) {
-            return tr1.getTable().getPhysicalName().getString().compareTo(tr2.getTable().getPhysicalName().getString());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 6f51a4c..322d24a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -39,7 +39,6 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.BaseQueryPlan;
 import org.apache.phoenix.execute.MutationState;
-import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMetaDataCacheClient;
@@ -107,8 +106,8 @@ public class DeleteCompiler {
         ConnectionQueryServices services = connection.getQueryServices();
         final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
         final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
-        Map<ImmutableBytesPtr,RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
-        Map<ImmutableBytesPtr,RowMutationState> indexMutations = null;
+        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutations = Maps.newHashMapWithExpectedSize(batchSize);
+        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> indexMutations = null;
         // If indexTableRef is set, we're deleting the rows from both the index table and
         // the data table through a single query to save executing an additional one.
         if (indexTableRef != null) {
@@ -148,11 +147,11 @@ public class DeleteCompiler {
                     }
                     table.newKey(ptr, values);
                 }
-                mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
+                mutations.put(ptr, PRow.DELETE_MARKER);
                 if (indexTableRef != null) {
                     ImmutableBytesPtr indexPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
                     rs.getCurrentRow().getKey(indexPtr);
-                    indexMutations.put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
+                    indexMutations.put(indexPtr, PRow.DELETE_MARKER);
                 }
                 if (mutations.size() > maxSize) {
                     throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
@@ -430,9 +429,9 @@ public class DeleteCompiler {
                         // keys for our ranges
                         ScanRanges ranges = context.getScanRanges();
                         Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator(); 
-                        Map<ImmutableBytesPtr,RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
+                        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                         while (iterator.hasNext()) {
-                            mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter()));
+                            mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), PRow.DELETE_MARKER);
                         }
                         return new MutationState(tableRef, mutation, 0, maxSize, connection);
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index f172814..b21cc2f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -42,7 +42,6 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
-import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
@@ -96,7 +95,7 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class UpsertCompiler {
-    private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr,RowMutationState> mutation, PhoenixStatement statement) {
+    private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation) {
         Map<PColumn,byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
         byte[][] pkValues = new byte[table.getPKColumns().size()][];
         // If the table uses salting, the first byte is the salting byte, set to an empty array
@@ -115,7 +114,7 @@ public class UpsertCompiler {
         }
         ImmutableBytesPtr ptr = new ImmutableBytesPtr();
         table.newKey(ptr, pkValues);
-        mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter()));
+        mutation.put(ptr, columnValues);
     }
 
     private static MutationState upsertSelect(PhoenixStatement statement, 
@@ -129,7 +128,7 @@ public class UpsertCompiler {
             boolean isAutoCommit = connection.getAutoCommit();
             byte[][] values = new byte[columnIndexes.length][];
             int rowCount = 0;
-            Map<ImmutableBytesPtr,RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
+            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize);
             PTable table = tableRef.getTable();
             ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
             ImmutableBytesWritable ptr = new ImmutableBytesWritable();
@@ -157,7 +156,7 @@ public class UpsertCompiler {
                             column.getMaxLength(), column.getScale(), column.getSortOrder());
                     values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                 }
-                setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement);
+                setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
                 rowCount++;
                 // Commit a batch if auto commit is true and we're at our batch size
                 if (isAutoCommit && rowCount % batchSize == 0) {
@@ -803,8 +802,8 @@ public class UpsertCompiler {
                         throw new IllegalStateException();
                     }
                 }
-                Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(1);
-                setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation, statement);
+                Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
+                setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
                 return new MutationState(tableRef, mutation, 0, maxSize, connection);
             }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
index a9d8311..63bf6a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/CommitException.java
@@ -19,32 +19,23 @@ package org.apache.phoenix.execute;
 
 import java.sql.SQLException;
 
-import org.apache.phoenix.jdbc.PhoenixConnection;
-
 public class CommitException extends SQLException {
-    private static final long serialVersionUID = 2L;
-    private final int[] uncommittedStatementIndexes;
+    private static final long serialVersionUID = 1L;
+    private final MutationState uncommittedState;
+    private final MutationState committedState;
 
-    public CommitException(Exception e, int[] uncommittedStatementIndexes) {
+    public CommitException(Exception e, MutationState uncommittedState, MutationState committedState) {
         super(e);
-        this.uncommittedStatementIndexes = uncommittedStatementIndexes;
+        this.uncommittedState = uncommittedState;
+        this.committedState = committedState;
+    }
+
+    public MutationState getUncommittedState() {
+        return uncommittedState;
     }
 
-    /**
-     * Returns indexes of UPSERT and DELETE statements that have failed. Indexes returned
-     * correspond to each failed statement's order of creation within a {@link PhoenixConnection} up to
-     * commit/rollback.
-     * <p>
-     * Statements whose index is returned in this set correspond to one or more HBase mutations that have failed.
-     * <p>
-     * Statement indexes are maintained correctly for connections that mutate and query 
-     * <b>data</b> (DELETE, UPSERT and SELECT) only. Statement (and their subsequent failure) order
-     * is undefined for connections that execute metadata operations due to the fact that Phoenix rolls
-     * back connections after metadata mutations.
-     * 
-     * @see PhoenixConnection#getStatementExecutionCounter()
-     */
-    public int[] getUncommittedStatementIndexes() {
-    	return uncommittedStatementIndexes;
+    public MutationState getCommittedState() {
+        return committedState;
     }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 8053f15..04626a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.execute;
 
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -62,11 +61,9 @@ import org.cloudera.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.sun.istack.NotNull;
 
 /**
  * 
@@ -81,32 +78,40 @@ public class MutationState implements SQLCloseable {
     private PhoenixConnection connection;
     private final long maxSize;
     private final ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
-    private final Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations;
+    private final Map<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> mutations = Maps.newHashMapWithExpectedSize(3); // TODO: Sizing?
     private long sizeOffset;
     private int numRows = 0;
 
-    MutationState(long maxSize, PhoenixConnection connection, Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations) {
-        this.maxSize = maxSize;
-        this.connection = connection;
-        this.mutations = mutations;
-    }
-
-    public MutationState(long maxSize, PhoenixConnection connection) {
+    public MutationState(int maxSize, PhoenixConnection connection) {
         this(maxSize,connection,0);
     }
     
-    public MutationState(long maxSize, PhoenixConnection connection, long sizeOffset) {
-        this(maxSize, connection, Maps.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>newHashMapWithExpectedSize(connection.getMutateBatchSize()));
+    public MutationState(int maxSize, PhoenixConnection connection, long sizeOffset) {
+        this.maxSize = maxSize;
+        this.connection = connection;
         this.sizeOffset = sizeOffset;
     }
     
-    public MutationState(TableRef table, Map<ImmutableBytesPtr,RowMutationState> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
-        this(maxSize, connection, sizeOffset);
+    public MutationState(TableRef table, Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
+        this.maxSize = maxSize;
+        this.connection = connection;
         this.mutations.put(table, mutations);
+        this.sizeOffset = sizeOffset;
         this.numRows = mutations.size();
         throwIfTooBig();
     }
     
+    private MutationState(List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> entries, long sizeOffset, long maxSize, PhoenixConnection connection) {
+        this.maxSize = maxSize;
+        this.connection = connection;
+        this.sizeOffset = sizeOffset;
+        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : entries) {
+            numRows += entry.getValue().size();
+            this.mutations.put(entry.getKey(), entry.getValue());
+        }
+        throwIfTooBig();
+    }
+    
     private void throwIfTooBig() {
         if (numRows > maxSize) {
             // TODO: throw SQLException ?
@@ -129,28 +134,29 @@ public class MutationState implements SQLCloseable {
         }
         this.sizeOffset += newMutation.sizeOffset;
         // Merge newMutation with this one, keeping state from newMutation for any overlaps
-        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : newMutation.mutations.entrySet()) {
+        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : newMutation.mutations.entrySet()) {
             // Replace existing entries for the table with new entries
             TableRef tableRef = entry.getKey();
             PTable table = tableRef.getTable();
             boolean isIndex = table.getType() == PTableType.INDEX;
-            Map<ImmutableBytesPtr,RowMutationState> existingRows = this.mutations.put(tableRef, entry.getValue());
+            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> existingRows = this.mutations.put(tableRef, entry.getValue());
             if (existingRows != null) { // Rows for that table already exist
                 // Loop through new rows and replace existing with new
-                for (Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry : entry.getValue().entrySet()) {
+                for (Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry : entry.getValue().entrySet()) {
                     // Replace existing row with new row
-                	RowMutationState existingRowMutationState = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
-                    if (existingRowMutationState != null) {
-                    	Map<PColumn,byte[]> existingValues = existingRowMutationState.getColumnValues();
+                    Map<PColumn,byte[]> existingValues = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
+                    if (existingValues != null) {
                         if (existingValues != PRow.DELETE_MARKER) {
-                            Map<PColumn,byte[]> newRow = rowEntry.getValue().getColumnValues();
+                            Map<PColumn,byte[]> newRow = rowEntry.getValue();
                             // if new row is PRow.DELETE_MARKER, it means delete, and we don't need to merge it with existing row. 
                             if (newRow != PRow.DELETE_MARKER) {
-                                // Merge existing column values with new column values
-                                existingRowMutationState.join(rowEntry.getValue());
+                                // Replace existing column values with new column values
+                                for (Map.Entry<PColumn,byte[]> valueEntry : newRow.entrySet()) {
+                                    existingValues.put(valueEntry.getKey(), valueEntry.getValue());
+                                }
                                 // Now that the existing row has been merged with the new row, replace it back
-                                // again (since it was merged with the new one above).
-                                existingRows.put(rowEntry.getKey(), existingRowMutationState);
+                                // again (since it was replaced with the new one above).
+                                existingRows.put(rowEntry.getKey(), existingValues);
                             }
                         }
                     } else {
@@ -170,16 +176,16 @@ public class MutationState implements SQLCloseable {
         throwIfTooBig();
     }
     
-    private Iterator<Pair<byte[],List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values, long timestamp, boolean includeMutableIndexes) {
+    private Iterator<Pair<byte[],List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, Map<PColumn, byte[]>> values, long timestamp, boolean includeMutableIndexes) {
         final Iterator<PTable> indexes = // Only maintain tables with immutable rows through this client-side mechanism
                 (tableRef.getTable().isImmutableRows() || includeMutableIndexes) ? 
                         IndexMaintainer.nonDisabledIndexIterator(tableRef.getTable().getIndexes().iterator()) : 
                         Iterators.<PTable>emptyIterator();
         final List<Mutation> mutations = Lists.newArrayListWithExpectedSize(values.size());
         final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
-        Iterator<Map.Entry<ImmutableBytesPtr,RowMutationState>> iterator = values.entrySet().iterator();
+        Iterator<Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>>> iterator = values.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry = iterator.next();
+            Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry = iterator.next();
             ImmutableBytesPtr key = rowEntry.getKey();
             PRow row = tableRef.getTable().newRow(connection.getKeyValueBuilder(), timestamp, key);
             List<Mutation> rowMutations, rowMutationsPertainingToIndex;
@@ -191,7 +197,7 @@ public class MutationState implements SQLCloseable {
                 // delete rows).
                 rowMutationsPertainingToIndex = Collections.emptyList();
             } else {
-                for (Map.Entry<PColumn,byte[]> valueEntry : rowEntry.getValue().getColumnValues().entrySet()) {
+                for (Map.Entry<PColumn,byte[]> valueEntry : rowEntry.getValue().entrySet()) {
                     row.setValue(valueEntry.getKey(), valueEntry.getValue());
                 }
                 rowMutations = row.toRowMutations();
@@ -243,14 +249,14 @@ public class MutationState implements SQLCloseable {
     }
     
     public Iterator<Pair<byte[],List<Mutation>>> toMutations(final boolean includeMutableIndexes) {
-        final Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>>> iterator = this.mutations.entrySet().iterator();
+        final Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
         if (!iterator.hasNext()) {
             return Iterators.emptyIterator();
         }
         Long scn = connection.getSCN();
         final long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
         return new Iterator<Pair<byte[],List<Mutation>>>() {
-            private Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> current = iterator.next();
+            private Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> current = iterator.next();
             private Iterator<Pair<byte[],List<Mutation>>> innerIterator = init();
                     
             private Iterator<Pair<byte[],List<Mutation>>> init() {
@@ -291,7 +297,7 @@ public class MutationState implements SQLCloseable {
         Long scn = connection.getSCN();
         MetaDataClient client = new MetaDataClient(connection);
         long[] timeStamps = new long[this.mutations.size()];
-        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : mutations.entrySet()) {
+        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : mutations.entrySet()) {
             TableRef tableRef = entry.getKey();
             long serverTimeStamp = tableRef.getTimeStamp();
             PTable table = tableRef.getTable();
@@ -306,15 +312,12 @@ public class MutationState implements SQLCloseable {
                         // TODO: use bitset?
                         table = result.getTable();
                         PColumn[] columns = new PColumn[table.getColumns().size()];
-                        for (Map.Entry<ImmutableBytesPtr,RowMutationState> rowEntry : entry.getValue().entrySet()) {
-                        	RowMutationState valueEntry = rowEntry.getValue();
-                            if (valueEntry != null) {
-                            	Map<PColumn, byte[]> colValues = valueEntry.getColumnValues();
-                            	if (colValues != PRow.DELETE_MARKER) {
-	                                for (PColumn column : colValues.keySet()) {
-	                                    columns[column.getPosition()] = column;
-	                                }
-                            	}
+                        for (Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry : entry.getValue().entrySet()) {
+                            Map<PColumn,byte[]> valueEntry = rowEntry.getValue();
+                            if (valueEntry != PRow.DELETE_MARKER) {
+                                for (PColumn column : valueEntry.keySet()) {
+                                    columns[column.getPosition()] = column;
+                                }
                             }
                         }
                         for (PColumn column : columns) {
@@ -354,14 +357,15 @@ public class MutationState implements SQLCloseable {
         int i = 0;
         byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
         long[] serverTimeStamps = validate();
-        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>>> iterator = this.mutations.entrySet().iterator();
+        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
+        List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size());
 
         // add tracing for this operation
         TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
         Span span = trace.getSpan();
         while (iterator.hasNext()) {
-            Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry = iterator.next();
-            Map<ImmutableBytesPtr,RowMutationState> valuesMap = entry.getValue();
+            Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry = iterator.next();
+            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
             TableRef tableRef = entry.getKey();
             PTable table = tableRef.getTable();
             table.getIndexMaintainers(tempPtr, connection);
@@ -421,6 +425,7 @@ public class MutationState implements SQLCloseable {
                         child.stop();
                         shouldRetry = false;
                         if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Total time for batch call of  " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms", connection));
+                        committedList.add(entry);
                     } catch (Exception e) {
                         SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                         if (inferredE != null) {
@@ -441,7 +446,9 @@ public class MutationState implements SQLCloseable {
                             }
                             e = inferredE;
                         }
-                        sqlE = new CommitException(e, getUncommittedSattementIndexes());
+                        // Throw to client with both what was committed so far and what is left to be committed.
+                        // That way, client can either undo what was done or try again with what was not done.
+                        sqlE = new CommitException(e, this, new MutationState(committedList, this.sizeOffset, this.maxSize, this.connection));
                     } finally {
                         try {
                             hTable.close();
@@ -481,64 +488,7 @@ public class MutationState implements SQLCloseable {
         numRows = 0;
     }
     
-    private int[] getUncommittedSattementIndexes() {
-    	int[] result = new int[0];
-    	for (Map<ImmutableBytesPtr, RowMutationState> rowMutations : mutations.values()) {
-    		for (RowMutationState rowMutationState : rowMutations.values()) {
-    			result = joinSortedIntArrays(result, rowMutationState.getStatementIndexes());
-    		}
-    	}
-    	return result;
-    }
-    
     @Override
     public void close() throws SQLException {
     }
-    
-    public static int[] joinSortedIntArrays(int[] a, int[] b) {
-        int[] result = new int[a.length + b.length];
-        int i = 0, j = 0, k = 0, current;
-        while (i < a.length && j < b.length) {
-            current = a[i] < b[j] ? a[i++] : b[j++];
-            for ( ; i < a.length && a[i] == current; i++);
-            for ( ; j < b.length && b[j] == current; j++);
-            result[k++] = current;
-        }
-        while (i < a.length) {
-            for (current = a[i++] ; i < a.length && a[i] == current; i++);
-            result[k++] = current;
-        }
-        while (j < b.length) {
-            for (current = b[j++] ; j < b.length && b[j] == current; j++);
-            result[k++] = current;
-        }
-        return Arrays.copyOf(result, k);
-    }
-    
-    public static class RowMutationState {
-        private Map<PColumn,byte[]> columnValues;
-        private int[] statementIndexes;
-
-        public RowMutationState(@NotNull Map<PColumn,byte[]> columnValues, int statementIndex) {
-            Preconditions.checkNotNull(columnValues);
-
-            this.columnValues = columnValues;
-            this.statementIndexes = new int[] {statementIndex};
-        }
-
-        Map<PColumn, byte[]> getColumnValues() {
-            return columnValues;
-        }
-
-        int[] getStatementIndexes() {
-            return statementIndexes;
-        }
-        
-        void join(RowMutationState newRow) {
-            getColumnValues().putAll(newRow.getColumnValues());
-            statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes());
-        }
-        
-
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index c9ac94a..de9e323 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.expression.function.FunctionArgumentType;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -122,21 +121,21 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     private final Properties info;
     private List<SQLCloseable> statements = new ArrayList<SQLCloseable>();
     private final Map<PDataType<?>, Format> formatters = new HashMap<>();
-    private MutationState mutationState;
+    private final MutationState mutationState;
     private final int mutateBatchSize;
     private final Long scn;
     private boolean isAutoCommit = false;
     private PMetaData metaData;
     private final PName tenantId;
-    private final String datePattern; 
+    private final String datePattern;
     private final String timePattern;
     private final String timestampPattern;
-    private int statementExecutionCounter;
+    
     private boolean isClosed = false;
     private Sampler<?> sampler;
     private boolean readOnly = false;
-    private Map<String, String> customTracingAnnotations = emptyMap();
-    
+    private Map<String, String> customTracingAnnotations = emptyMap(); 
+ 
     static {
         Tracing.addTraceMetricsSource();
     }
@@ -151,20 +150,17 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
         this.sampler = connection.sampler;
-        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(PhoenixConnection connection, long scn) throws SQLException {
         this(connection.getQueryServices(), connection, scn);
         this.sampler = connection.sampler;
-        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, PhoenixConnection connection, long scn) throws SQLException {
         this(services, connection.getURL(), newPropsWithSCN(scn,connection.getClientInfo()), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
         this.sampler = connection.sampler;
-        this.statementExecutionCounter = connection.statementExecutionCounter;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, String url, Properties info, PMetaData metaData) throws SQLException {
@@ -237,7 +233,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
             }
             
         });
-        this.mutationState = newMutationState(maxSize);
+        this.mutationState = new MutationState(maxSize, this);
         this.services.addConnection(this);
 
         // setup tracing, if its enabled
@@ -365,10 +361,6 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         return metaData;
     }
 
-    protected MutationState newMutationState(int maxSize) {
-        return new MutationState(maxSize, this); 
-    }
-    
     public MutationState getMutationState() {
         return mutationState;
     }
@@ -434,7 +426,6 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
                 return null;
             }
         }, Tracing.withTracing(this, "committing mutations"));
-        statementExecutionCounter = 0;
     }
 
     @Override
@@ -635,7 +626,6 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     @Override
     public void rollback() throws SQLException {
         mutationState.rollback(this);
-        statementExecutionCounter = 0;
     }
 
     @Override
@@ -786,19 +776,4 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     public KeyValueBuilder getKeyValueBuilder() {
         return this.services.getKeyValueBuilder();
     }
-    
-    /**
-     * Used to track executions of {@link Statement}s and {@link PreparedStatement}s that were created from this connection before
-     * commit or rollback. 0-based. Used to associate partial save errors with SQL statements
-     * invoked by users.
-     * @see CommitException
-     * @see #incrementStatementExecutionCounter()
-     */
-    public int getStatementExecutionCounter() {
-		return statementExecutionCounter;
-	}
-    
-    public void incrementStatementExecutionCounter() {
-        statementExecutionCounter++;
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
index a23484c..25be8c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java
@@ -54,8 +54,8 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.schema.ExecuteQueryNotApplicableException;
 import org.apache.phoenix.schema.ExecuteUpdateNotApplicableException;
-import org.apache.phoenix.schema.Sequence;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.Sequence;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.SQLCloseable;
 
@@ -79,7 +79,8 @@ public class PhoenixPreparedStatement extends PhoenixStatement implements Prepar
 
     private final String query;
 
-    public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) throws SQLException, IOException {
+    public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) throws SQLException,
+            IOException {
         super(connection);
         this.statement = parser.nextStatement(new ExecutableNodeFactory());
         if (this.statement == null) { throw new EOFException(); }
@@ -88,7 +89,7 @@ public class PhoenixPreparedStatement extends PhoenixStatement implements Prepar
         this.parameters = Arrays.asList(new Object[statement.getBindCount()]);
         Collections.fill(parameters, BindManager.UNBOUND_PARAMETER);
     }
-    
+
     public PhoenixPreparedStatement(PhoenixConnection connection, String query) throws SQLException {
         super(connection);
         this.query = query;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index c6d086a..4ca5bb5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -151,7 +151,6 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class PhoenixStatement implements Statement, SQLCloseable, org.apache.phoenix.jdbc.Jdbc7Shim.Statement {
-	
     private static final Logger logger = LoggerFactory.getLogger(PhoenixStatement.class);
     
     public enum Operation {
@@ -244,7 +243,6 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
                         setLastResultSet(rs);
                         setLastUpdateCount(NO_UPDATE);
                         setLastUpdateOperation(stmt.getOperation());
-                        connection.incrementStatementExecutionCounter();
                         return rs;
                     } catch (RuntimeException e) {
                         // FIXME: Expression.evaluate does not throw SQLException
@@ -291,7 +289,6 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
                                 int lastUpdateCount = (int) Math.min(Integer.MAX_VALUE, state.getUpdateCount());
                                 setLastUpdateCount(lastUpdateCount);
                                 setLastUpdateOperation(stmt.getOperation());
-                                connection.incrementStatementExecutionCounter();
                                 return lastUpdateCount;
                             } catch (RuntimeException e) {
                                 // FIXME: Expression.evaluate does not throw SQLException

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
deleted file mode 100644
index 67c3353..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- *distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.execute;
-
-import static org.apache.phoenix.execute.MutationState.joinSortedIntArrays;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
-import org.junit.Test;
-
-public class MutationStateTest {
-
-    @Test
-    public void testJoinIntArrays() {
-        // simple case
-        int[] a = new int[] {1};
-        int[] b = new int[] {2};
-        int[] result = joinSortedIntArrays(a, b);
-        
-        assertEquals(2, result.length);
-        assertArrayEquals(new int[] {1,2}, result);
-        
-        // empty arrays
-        a = new int[0];
-        b = new int[0];
-        result = joinSortedIntArrays(a, b);
-        
-        assertEquals(0, result.length);
-        assertArrayEquals(new int[] {}, result);
-        
-        // dupes between arrays
-        a = new int[] {1,2,3};
-        b = new int[] {1,2,4};
-        result = joinSortedIntArrays(a, b);
-        
-        assertEquals(4, result.length);
-        assertArrayEquals(new int[] {1,2,3,4}, result);
-        
-        // dupes within arrays
-        a = new int[] {1,2,2,3};
-        b = new int[] {1,2,4};
-        result = joinSortedIntArrays(a, b);
-        
-        assertEquals(4, result.length);
-        assertArrayEquals(new int[] {1,2,3,4}, result);
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/569469a4/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index b64eff8..9947440 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -655,7 +655,7 @@ public abstract class BaseTest {
      * Create a {@link PhoenixTestDriver} and register it.
      * @return an initialized and registered {@link PhoenixTestDriver} 
      */
-    public static PhoenixTestDriver initAndRegisterDriver(String url, ReadOnlyProps props) throws Exception {
+    protected static PhoenixTestDriver initAndRegisterDriver(String url, ReadOnlyProps props) throws Exception {
         PhoenixTestDriver newDriver = new PhoenixTestDriver(props);
         DriverManager.registerDriver(newDriver);
         Driver oldDriver = DriverManager.getDriver(url); 


[12/50] [abbrv] phoenix git commit: PHOENIX-1142 Improve CsvBulkLoadTool to parse different Date formats

Posted by ma...@apache.org.
PHOENIX-1142 Improve CsvBulkLoadTool to parse different Date formats


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d770333
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d770333
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d770333

Branch: refs/heads/calcite
Commit: 2d7703339f87c73625d251fe3322f839cc1ee791
Parents: 8c340f5
Author: Jeffrey Zhong <je...@apache.org>
Authored: Mon Feb 2 22:33:04 2015 -0800
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Fri Feb 6 14:08:15 2015 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/ProductMetricsIT.java       |   7 +-
 .../phoenix/end2end/ToDateFunctionIT.java       |  15 ++
 .../phoenix/end2end/TruncateFunctionIT.java     |   5 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |  63 ++++++--
 .../phoenix/end2end/VariableLengthPKIT.java     |   7 +-
 .../phoenix/mapreduce/CsvBulkLoadToolIT.java    |  21 ++-
 .../phoenix/expression/LiteralExpression.java   |   9 +-
 .../expression/function/ToDateFunction.java     |  17 +--
 .../apache/phoenix/parse/ToDateParseNode.java   |  10 +-
 .../org/apache/phoenix/schema/types/PDate.java  |   5 +-
 .../org/apache/phoenix/schema/types/PTime.java  |   2 +
 .../apache/phoenix/schema/types/PTimestamp.java |   2 +
 .../java/org/apache/phoenix/util/DateUtil.java  | 147 +++++++++++++------
 .../phoenix/util/csv/CsvUpsertExecutor.java     |  35 ++++-
 .../util/csv/StringToArrayConverter.java        |  24 +--
 .../phoenix/compile/WhereCompilerTest.java      |   3 +-
 .../org/apache/phoenix/util/DateUtilTest.java   |  28 ++--
 17 files changed, 265 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
index cd436e5..975541e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
@@ -57,7 +57,6 @@ import com.google.common.collect.Ordering;
 
 
 public class ProductMetricsIT extends BaseClientManagedTimeIT {
-    private static Format format = DateUtil.getDateParser(DateUtil.DEFAULT_DATE_FORMAT);
     private static final String PRODUCT_METRICS_NAME = "PRODUCT_METRICS";
     private static final String PRODUCT_METRICS_SCHEMA_NAME = "";
     private static final String DS1 = "1970-01-01 00:58:00";
@@ -88,11 +87,7 @@ public class ProductMetricsIT extends BaseClientManagedTimeIT {
     }
     
     private static Date toDate(String dateString) {
-        try {
-            return (Date)format.parseObject(dateString);
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
+        return DateUtil.parseDateTime(dateString);
     }
     
     private static void initTable(byte[][] splits, long ts) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
index 19257c1..984e21b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
@@ -27,12 +27,14 @@ import java.sql.Statement;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.DateUtil;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
@@ -69,6 +71,19 @@ public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
     public void testToDate_Default() throws SQLException {
         // Default time zone is GMT, so this is timestamp 0
         assertEquals(0L, callToDateFunction("TO_DATE('1970-01-01 00:00:00')").getTime());
+        assertEquals(0L, callToDateFunction("TO_DATE('1970-01-01 00:00:00.000')").getTime());
+        assertEquals(0L, callToDateFunction("TO_DATE('1970-01-01')").getTime());
+        assertEquals(0L, callToDateFunction("TO_DATE('1970/01/01','yyyy/MM/dd')").getTime());
+
+        // Test other ISO 8601 Date Compliant Formats to verify they can be parsed
+        try {
+            callToDateFunction("TO_DATE('2015-01-27T16:17:57+00:00')");
+            callToDateFunction("TO_DATE('2015-01-27T16:17:57Z')");
+            callToDateFunction("TO_DATE('2015-W05')");
+            callToDateFunction("TO_DATE('2015-W05-2')");
+        } catch (Exception ex) {
+            fail("TO_DATE Parse ISO8601 Time Failed due to:" + ex);
+        }
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
index 4cd263e..59c499d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateFunctionIT.java
@@ -42,17 +42,16 @@ import org.junit.Test;
 
 
 public class TruncateFunctionIT extends BaseClientManagedTimeIT {
-    private static Format format = DateUtil.getDateParser(DateUtil.DEFAULT_MS_DATE_FORMAT);
     private static final String DS1 = "1970-01-10 00:58:01.587";
     private static final String DS2 = "1970-01-20 01:02:45.906";
     private static final String DS3 = "1970-01-30 01:30:24.353";
     
     private static Date toDate(String s) throws ParseException {
-        return (Date) (format.parseObject(s));
+        return DateUtil.parseDateTime(s);
     }
     
     private static Timestamp toTimestamp(String s) throws ParseException {
-        return new Timestamp(((Date) (format.parseObject(s))).getTime());
+        return new Timestamp((DateUtil.parseDateTime(s)).getTime());
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
index 7c3c073..b44fbff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
@@ -34,8 +34,7 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Timestamp;
-import java.text.Format;
-import java.text.ParseException;
+import java.sql.Time;
 import java.util.Properties;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -548,14 +547,8 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
         }
     }
     
-    private static Format DATE_FORMAT = DateUtil.getDateParser(DateUtil.DEFAULT_DATE_FORMAT);
-    
     private static Date toDate(String dateString) {
-        try {
-            return (Date)DATE_FORMAT.parseObject(dateString);
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
+        return DateUtil.parseDateTime(dateString);
     }
     
     @Test
@@ -599,6 +592,56 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
              closeStmtAndConn(stmt, conn);
         }
     }
-        
+
+    @Test
+    public void testUpsertDateString() throws Exception {
+        long ts = nextTimestamp();
+        Properties props = new Properties();
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
+        Connection conn = null;
+        PreparedStatement stmt = null;
+        try {
+            conn = DriverManager.getConnection(getUrl(), props);
+            stmt = conn.prepareStatement("create table UpsertDateVal (k varchar, v date not null, t timestamp" +
+                    ", tt time constraint pk primary key (k,v desc))");
+            stmt.execute();
+        } finally {
+            closeStmtAndConn(stmt, conn);
+        }
+
+        String dateStr = "2013-01-01";
+        String timeStampStr = "2013-01-01 04:00:00.123456";
+        String timeStr = "2013-01-01 04:00:00";
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
+        try {
+            conn = DriverManager.getConnection(getUrl(), props);
+            stmt = conn.prepareStatement("upsert into UpsertDateVal(k,v,t,tt) values ('a', ?, ?, ?)");
+            stmt.setString(1, dateStr);
+            stmt.setString(2, timeStampStr);
+            stmt.setString(3, timeStr);
+            stmt.executeUpdate();
+            conn.commit();
+        } finally {
+            closeStmtAndConn(stmt, conn);
+        }
+
+        Date date = toDate(dateStr);
+        Timestamp timeStamp = new Timestamp(toDate(timeStampStr).getTime());
+        Time time = new Time(toDate(timeStr).getTime());
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 4));
+        try {
+            conn = DriverManager.getConnection(getUrl(), props);
+            stmt = conn.prepareStatement("select * from UpsertDateVal");
+            ResultSet rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("a", rs.getString(1));
+            assertEquals(date, rs.getDate(2));
+            assertEquals(timeStamp, rs.getTimestamp(3));
+            assertEquals(time, rs.getTime(4));
+            assertFalse(rs.next());
+        } finally {
+            closeStmtAndConn(stmt, conn);
+        }
+    }
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
index e836fec..0d9aeb2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
@@ -51,16 +51,11 @@ import org.junit.Test;
 
 
 public class VariableLengthPKIT extends BaseClientManagedTimeIT {
-    private static Format format = DateUtil.getDateParser(DateUtil.DEFAULT_DATE_FORMAT);
     private static final String DS1 = "1970-01-01 00:58:00";
     private static final Date D1 = toDate(DS1);
 
     private static Date toDate(String dateString) {
-        try {
-            return (Date)format.parseObject(dateString);
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
+        return DateUtil.parseDateTime(dateString);
     }
 
     protected static void initGroupByRowKeyColumns(long ts) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
index 0501142..00968ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
@@ -17,6 +17,13 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_ATTRIB;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.PrintWriter;
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -89,30 +97,33 @@ public class CsvBulkLoadToolIT {
     public void testBasicImport() throws Exception {
 
         Statement stmt = conn.createStatement();
-        stmt.execute("CREATE TABLE TABLE1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
+        stmt.execute("CREATE TABLE TABLE1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, T DATE)");
 
         FileSystem fs = FileSystem.get(hbaseTestUtil.getConfiguration());
         FSDataOutputStream outputStream = fs.create(new Path("/tmp/input1.csv"));
         PrintWriter printWriter = new PrintWriter(outputStream);
-        printWriter.println("1,Name 1");
-        printWriter.println("2,Name 2");
+        printWriter.println("1,Name 1,1970/01/01");
+        printWriter.println("2,Name 2,1970/01/02");
         printWriter.close();
 
         CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
-        csvBulkLoadTool.setConf(hbaseTestUtil.getConfiguration());
+        csvBulkLoadTool.setConf(new Configuration(hbaseTestUtil.getConfiguration()));
+        csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"yyyy/MM/dd");
         int exitCode = csvBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input1.csv",
                 "--table", "table1",
                 "--zookeeper", zkQuorum});
         assertEquals(0, exitCode);
 
-        ResultSet rs = stmt.executeQuery("SELECT id, name FROM table1 ORDER BY id");
+        ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM table1 ORDER BY id");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals("Name 1", rs.getString(2));
+        assertEquals(DateUtil.parseDateTime("1970-01-01"), rs.getDate(3));
         assertTrue(rs.next());
         assertEquals(2, rs.getInt(1));
         assertEquals("Name 2", rs.getString(2));
+        assertEquals(DateUtil.parseDateTime("1970-01-02"), rs.getDate(3));
         assertFalse(rs.next());
 
         rs.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index 757ba34..e2bdc82 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -28,6 +28,9 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
+import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.schema.SortOrder;
@@ -160,7 +163,11 @@ public class LiteralExpression extends BaseTerminalExpression {
         PDataType actualType = PDataType.fromLiteral(value);
         // For array we should check individual element in it?
         // It would be costly though!!!!!
-        if (!actualType.isCoercibleTo(type, value)) {
+        // UpsertStatement can try to cast varchar to date type but PVarchar can't CoercibleTo Date or Timestamp
+        // otherwise TO_NUMBER like functions will fail
+        if (!actualType.isCoercibleTo(type, value) &&
+                (!actualType.equals(PVarchar.INSTANCE) ||
+                        !(type.equals(PDate.INSTANCE) || type.equals(PTimestamp.INSTANCE) || type.equals(PTime.INSTANCE)))) {
             throw TypeMismatchException.newException(type, actualType, value.toString());
         }
         value = type.toObject(value, actualType);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
index 3e4cfae..73ca3ed 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java
@@ -23,6 +23,7 @@ import java.text.Format;
 import java.text.ParseException;
 import java.util.List;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.WritableUtils;
 
@@ -53,13 +54,13 @@ import org.apache.phoenix.util.DateUtil;
                 @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } )
 public class ToDateFunction extends ScalarFunction {
     public static final String NAME = "TO_DATE";
-    private Format dateParser;
+    private DateUtil.DateTimeParser dateParser;
     private String dateFormat;
 
     public ToDateFunction() {
     }
 
-    public ToDateFunction(List<Expression> children, String dateFormat, Format dateParser) throws SQLException {
+    public ToDateFunction(List<Expression> children, String dateFormat, DateUtil.DateTimeParser dateParser) throws SQLException {
         super(children.subList(0, 1));
         this.dateFormat = dateFormat;
         this.dateParser = dateParser;
@@ -93,14 +94,10 @@ public class ToDateFunction extends ScalarFunction {
         }
         PDataType type = expression.getDataType();
         String dateStr = (String)type.toObject(ptr, expression.getSortOrder());
-        try {
-            Object value = dateParser.parseObject(dateStr);
-            byte[] byteValue = getDataType().toBytes(value);
-            ptr.set(byteValue);
-            return true;
-        } catch (ParseException e) {
-            throw new IllegalStateException("to_date('" + dateStr + ")' did not match expected date format of '" + dateFormat + "'.");
-        }
+        Object value = dateParser.parseDateTime(dateStr);
+        byte[] byteValue = getDataType().toBytes(value);
+        ptr.set(byteValue);
+        return true;
      }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
index 46bca63..6140dbc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java
@@ -38,21 +38,15 @@ public class ToDateParseNode extends FunctionParseNode {
 
     @Override
     public FunctionExpression create(List<Expression> children, StatementContext context) throws SQLException {
-        Format dateParser;
         String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue();
         String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue();
-        TimeZone parserTimeZone = context.getDateFormatTimeZone();
         if (dateFormat == null) {
             dateFormat = context.getDateFormat();
         }
         if (timeZoneId == null) {
-            parserTimeZone = context.getDateFormatTimeZone();
-        } else if ("LOCAL".equalsIgnoreCase(timeZoneId)) {
-            parserTimeZone = TimeZone.getDefault();
-        } else {
-            parserTimeZone = TimeZone.getTimeZone(timeZoneId);
+            timeZoneId = context.getDateFormatTimeZone().getID();
         }
-        dateParser = DateUtil.getDateParser(dateFormat, parserTimeZone);
+        DateUtil.DateTimeParser dateParser = DateUtil.getDateParser(dateFormat, timeZoneId);
         return new ToDateFunction(children, dateFormat, dateParser);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index 9ab2226..13a828f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -70,6 +70,8 @@ public class PDate extends PDataType<Date> {
       return new Date((Long) object);
     } else if (actualType == PDecimal.INSTANCE) {
       return new Date(((BigDecimal) object).longValueExact());
+    } else if (actualType == PVarchar.INSTANCE) {
+      return DateUtil.parseDateTime((String) object);
     }
     return throwConstraintViolationException(actualType, this);
   }
@@ -93,7 +95,8 @@ public class PDate extends PDataType<Date> {
 
   @Override
   public boolean isCastableTo(PDataType targetType) {
-    return super.isCastableTo(targetType) || equalsAny(targetType, PDecimal.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE);
+    return super.isCastableTo(targetType) ||
+            equalsAny(targetType, PDecimal.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
index 319f801..d824885 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTime.java
@@ -77,6 +77,8 @@ public class PTime extends PDataType<Time> {
       return new java.sql.Time((Long) object);
     } else if (actualType == PDecimal.INSTANCE) {
       return new java.sql.Time(((BigDecimal) object).longValueExact());
+    } else if (actualType == PVarchar.INSTANCE) {
+      return DateUtil.parseDateTime((String) object);
     }
     return throwConstraintViolationException(actualType, this);
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index 2b95611..4bdcb86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -83,6 +83,8 @@ public class PTimestamp extends PDataType<Timestamp> {
           (bd.remainder(BigDecimal.ONE).multiply(QueryConstants.BD_MILLIS_NANOS_CONVERSION))
               .intValue();
       return DateUtil.getTimestamp(ms, nanos);
+    } else if (actualType == PVarchar.INSTANCE) {
+      return new Timestamp(DateUtil.parseDateTime((String) object).getTime());
     }
     return throwConstraintViolationException(actualType, this);
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
index 8952708..659f45e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java
@@ -27,10 +27,8 @@ import java.text.SimpleDateFormat;
 import java.util.TimeZone;
 
 import org.apache.commons.lang.time.FastDateFormat;
-
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.IllegalDataException;
-import org.joda.time.DateTime;
 import org.joda.time.chrono.ISOChronology;
 import org.joda.time.format.DateTimeFormatter;
 import org.joda.time.format.DateTimeFormatterBuilder;
@@ -49,62 +47,58 @@ public class DateUtil {
     public static final Format DEFAULT_MS_DATE_FORMATTER = FastDateFormat.getInstance(
             DEFAULT_MS_DATE_FORMAT, TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID));
 
-    private static final DateTimeFormatter DATE_TIME_PARSER = new DateTimeFormatterBuilder()
+    private static final DateTimeFormatter ISO_DATE_TIME_PARSER = new DateTimeFormatterBuilder()
             .append(ISODateTimeFormat.dateParser())
             .appendOptional(new DateTimeFormatterBuilder()
-                    .appendLiteral(' ')
+                    .appendLiteral(' ').toParser())
+            .appendOptional(new DateTimeFormatterBuilder()
                     .append(ISODateTimeFormat.timeParser()).toParser())
             .toFormatter()
+            .withZoneUTC()
             .withChronology(ISOChronology.getInstanceUTC());
 
     private DateUtil() {
     }
 
-    public static Format getDateParser(String pattern, TimeZone timeZone) {
-        SimpleDateFormat format = new SimpleDateFormat(pattern) {
-            @Override
-            public java.util.Date parseObject(String source) throws ParseException {
-                java.util.Date date = super.parse(source);
-                return new java.sql.Date(date.getTime());
-            }
-        };
-        format.setTimeZone(timeZone);
-        return format;
+    public static DateTimeParser getDateParser(String pattern, TimeZone timeZone) {
+        if(DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) &&
+                timeZone.getID().equalsIgnoreCase(DateUtil.DEFAULT_TIME_ZONE_ID)) {
+            return ISODateFormatParser.getInstance();
+        } else {
+            return new SimpleDateFormatParser(pattern, timeZone);
+        }
+    }
+
+    public static DateTimeParser getDateParser(String pattern, String timeZoneId) {
+        if(timeZoneId == null) {
+            timeZoneId = DateUtil.DEFAULT_TIME_ZONE_ID;
+        }
+        TimeZone parserTimeZone;
+        if ("LOCAL".equalsIgnoreCase(timeZoneId)) {
+            parserTimeZone = TimeZone.getDefault();
+        } else {
+            parserTimeZone = TimeZone.getTimeZone(timeZoneId);
+        }
+        return getDateParser(pattern, parserTimeZone);
     }
 
-    public static Format getDateParser(String pattern) {
+    public static DateTimeParser getDateParser(String pattern) {
         return getDateParser(pattern, DEFAULT_TIME_ZONE);
     }
 
-    public static Format getTimeParser(String pattern, TimeZone timeZone) {
-        SimpleDateFormat format = new SimpleDateFormat(pattern) {
-            @Override
-            public java.util.Date parseObject(String source) throws ParseException {
-                java.util.Date date = super.parse(source);
-                return new java.sql.Time(date.getTime());
-            }
-        };
-        format.setTimeZone(timeZone);
-        return format;
+    public static DateTimeParser getTimeParser(String pattern, TimeZone timeZone) {
+        return getDateParser(pattern, timeZone);
     }
 
-    public static Format getTimeParser(String pattern) {
+    public static DateTimeParser getTimeParser(String pattern) {
         return getTimeParser(pattern, DEFAULT_TIME_ZONE);
     }
 
-    public static Format getTimestampParser(String pattern, TimeZone timeZone) {
-        SimpleDateFormat format = new SimpleDateFormat(pattern) {
-            @Override
-            public java.util.Date parseObject(String source) throws ParseException {
-                java.util.Date date = super.parse(source);
-                return new java.sql.Timestamp(date.getTime());
-            }
-        };
-        format.setTimeZone(timeZone);
-        return format;
+    public static DateTimeParser getTimestampParser(String pattern, TimeZone timeZone) {
+        return getDateParser(pattern, timeZone);
     }
 
-    public static Format getTimestampParser(String pattern) {
+    public static DateTimeParser getTimestampParser(String pattern) {
         return getTimestampParser(pattern, DEFAULT_TIME_ZONE);
     }
 
@@ -114,24 +108,20 @@ public class DateUtil {
                 : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE);
     }
 
-    private static DateTime parseDateTime(String dateTimeValue) {
-        try {
-            return DATE_TIME_PARSER.parseDateTime(dateTimeValue);
-        } catch (IllegalArgumentException e) {
-            throw new IllegalDataException(e);
-        }
+    public static Date parseDateTime(String dateTimeValue) {
+        return ISODateFormatParser.getInstance().parseDateTime(dateTimeValue);
     }
 
     public static Date parseDate(String dateValue) {
-        return new Date(parseDateTime(dateValue).getMillis());
+        return parseDateTime(dateValue);
     }
 
     public static Time parseTime(String timeValue) {
-        return new Time(parseDateTime(timeValue).getMillis());
+        return new Time(parseDateTime(timeValue).getTime());
     }
 
     public static Timestamp parseTimestamp(String timestampValue) {
-        return new Timestamp(parseDateTime(timestampValue).getMillis());
+        return new Timestamp(parseDateTime(timestampValue).getTime());
     }
 
     /**
@@ -153,4 +143,69 @@ public class DateUtil {
     public static Timestamp getTimestamp(BigDecimal bd) {
         return DateUtil.getTimestamp(bd.longValue(), ((bd.remainder(BigDecimal.ONE).multiply(BigDecimal.valueOf(QueryConstants.MILLIS_TO_NANOS_CONVERTOR))).intValue()));
     }
+
+    public static interface DateTimeParser {
+        public Date parseDateTime(String dateTimeString) throws IllegalDataException;
+    }
+
+    /**
+     * This class is used when a user explicitly provides phoenix.query.dateFormat in configuration
+     */
+    private static class SimpleDateFormatParser implements DateTimeParser {
+        private String datePattern;
+        private SimpleDateFormat parser;
+
+        public SimpleDateFormatParser(String pattern) {
+            this(pattern, DEFAULT_TIME_ZONE);
+        }
+
+        public SimpleDateFormatParser(String pattern, TimeZone timeZone) {
+            datePattern = pattern;
+            parser = new SimpleDateFormat(pattern) {
+                @Override
+                public java.util.Date parseObject(String source) throws ParseException {
+                    java.util.Date date = super.parse(source);
+                    return new java.sql.Date(date.getTime());
+                }
+            };
+            parser.setTimeZone(timeZone);
+        }
+
+        public Date parseDateTime(String dateTimeString) throws IllegalDataException {
+            try {
+                java.util.Date date =parser.parse(dateTimeString);
+                return new java.sql.Date(date.getTime());
+            } catch (ParseException e) {
+                throw new IllegalDataException("to_date('" + dateTimeString + "') did not match expected date format of '" + datePattern + "'.");
+            }
+        }
+    }
+
+    /**
+     * This class is our default DateTime string parser
+     */
+    private static class ISODateFormatParser implements DateTimeParser {
+        private static ISODateFormatParser inst = null;
+        private static Object lock = new Object();
+        private ISODateFormatParser() {}
+
+        public static ISODateFormatParser getInstance() {
+            if(inst != null) return inst;
+
+            synchronized (lock) {
+                if (inst == null) {
+                    inst = new ISODateFormatParser();
+                }
+            }
+            return inst;
+        }
+
+        public Date parseDateTime(String dateTimeString) throws IllegalDataException {
+            try {
+                return new Date(ISO_DATE_TIME_PARSER.parseDateTime(dateTimeString).getMillis());
+            } catch(IllegalArgumentException ex) {
+                throw new IllegalDataException(ex);
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index d0f9c24..731a13f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,9 +20,16 @@ package org.apache.phoenix.util.csv;
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import org.apache.commons.csv.CSVRecord;
+import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
+import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -31,9 +38,12 @@ import javax.annotation.Nullable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.Date;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.List;
+import java.util.Properties;
+import java.util.TimeZone;
 
 /**
  * Executes upsert statements on a provided {@code PreparedStatement} based on incoming CSV records, notifying a
@@ -175,24 +185,43 @@ public class CsvUpsertExecutor implements Closeable {
                             arrayElementSeparator,
                             PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE)));
         } else {
-            return new SimpleDatatypeConversionFunction(dataType);
+            return new SimpleDatatypeConversionFunction(dataType, this.conn);
         }
     }
 
     /**
      * Performs typed conversion from String values to a given column value type.
      */
-    private static class SimpleDatatypeConversionFunction implements Function<String, Object> {
+    static class SimpleDatatypeConversionFunction implements Function<String, Object> {
 
         private final PDataType dataType;
+        private final DateUtil.DateTimeParser dateTimeParser;
 
-        private SimpleDatatypeConversionFunction(PDataType dataType) {
+        SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
+            Properties props = null;
+            try {
+                props = conn.getClientInfo();
+            } catch (SQLException e) {
+                throw new RuntimeException(e);
+            }
             this.dataType = dataType;
+            if(dataType.equals(PDate.INSTANCE) || dataType.equals(PTime.INSTANCE) || dataType.equals(PTimestamp.INSTANCE)) {
+                String dateFormat = props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+                        QueryServicesOptions.DEFAULT_DATE_FORMAT);
+                String timeZoneId = props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+                        QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
+                this.dateTimeParser = DateUtil.getDateParser(dateFormat, timeZoneId);
+            } else {
+                this.dateTimeParser = null;
+            }
         }
 
         @Nullable
         @Override
         public Object apply(@Nullable String input) {
+            if(dateTimeParser != null) {
+                return dateTimeParser.parseDateTime(input);
+            }
             return dataType.toObject(input);
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java
index d50863b..4e931a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Array;
 import java.sql.Connection;
 import java.sql.SQLException;
+import java.util.Properties;
 
 import javax.annotation.Nullable;
 
@@ -29,6 +30,7 @@ import com.google.common.base.Function;
 import com.google.common.base.Splitter;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import org.apache.phoenix.util.DateUtil;
 
 /**
  * Converts strings with delimited values into Phoenix arrays.
@@ -38,7 +40,7 @@ class StringToArrayConverter {
     private final Splitter splitter;
     private final Connection conn;
     private final PDataType elementDataType;
-    private final ElementConvertFunction elementConvertFunction;
+    private final CsvUpsertExecutor.SimpleDatatypeConversionFunction elementConvertFunction;
 
     /**
      * Instantiate with the array value separator and data type.
@@ -52,7 +54,7 @@ class StringToArrayConverter {
         this.conn = conn;
         this.splitter = Splitter.on(separatorString);
         this.elementDataType = elementDataType;
-        this.elementConvertFunction = new ElementConvertFunction(elementDataType);
+        this.elementConvertFunction = new CsvUpsertExecutor.SimpleDatatypeConversionFunction(elementDataType, this.conn);
     }
 
     /**
@@ -72,22 +74,4 @@ class StringToArrayConverter {
                                 splitter.split(input),
                                 elementConvertFunction)).toArray());
     }
-
-    /**
-     * Converts incoming string values into their typed equivalent.
-     */
-    private static class ElementConvertFunction implements Function<String, Object> {
-
-        private final PDataType pdataType;
-
-        private ElementConvertFunction(PDataType pdataType) {
-            this.pdataType = pdataType;
-        }
-
-        @Nullable
-        @Override
-        public Object apply(@Nullable String input) {
-            return pdataType.toObject(input);
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
index d15051c..6dbd303 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
@@ -277,8 +277,7 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
         Scan scan = plan.getContext().getScan();
         Filter filter = scan.getFilter();
 
-        Format format = DateUtil.getDateParser(DateUtil.DEFAULT_DATE_FORMAT);
-        Object date = format.parseObject(dateStr);
+        Object date = DateUtil.parseDateTime(dateStr);
 
         assertEquals(
             singleKVFilter(constantComparison(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d770333/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
index 2903b5d..1cca156 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java
@@ -69,64 +69,64 @@ public class DateUtilTest {
 
     @Test
     public void testGetDateParser_DefaultTimeZone() throws ParseException {
-        Date date = (Date) DateUtil.getDateParser("yyyy-MM-dd").parseObject("1970-01-01");
+        Date date = (Date) DateUtil.getDateParser("yyyy-MM-dd").parseDateTime("1970-01-01");
         assertEquals(0, date.getTime());
     }
 
     @Test
     public void testGetDateParser_CustomTimeZone() throws ParseException {
         Date date = (Date) DateUtil.getDateParser(
-                "yyyy-MM-dd", TimeZone.getTimeZone("GMT+1")).parseObject("1970-01-01");
+                "yyyy-MM-dd", TimeZone.getTimeZone("GMT+1")).parseDateTime("1970-01-01");
         assertEquals(-ONE_HOUR_IN_MILLIS, date.getTime());
     }
 
     @Test
     public void testGetDateParser_LocalTimeZone() throws ParseException {
         Date date = (Date) DateUtil.getDateParser(
-                "yyyy-MM-dd", TimeZone.getDefault()).parseObject("1970-01-01");
+                "yyyy-MM-dd", TimeZone.getDefault()).parseDateTime("1970-01-01");
         assertEquals(Date.valueOf("1970-01-01"), date);
     }
 
     @Test
     public void testGetTimestampParser_DefaultTimeZone() throws ParseException {
-        Timestamp ts = (Timestamp) DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss")
-                .parseObject("1970-01-01 00:00:00");
+        Timestamp ts = new Timestamp(DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss")
+                .parseDateTime("1970-01-01 00:00:00").getTime());
         assertEquals(0, ts.getTime());
     }
 
     @Test
     public void testGetTimestampParser_CustomTimeZone() throws ParseException {
-        Timestamp ts = (Timestamp) DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss", TimeZone.getTimeZone("GMT+1"))
-                .parseObject("1970-01-01 00:00:00");
+        Timestamp ts = new Timestamp(DateUtil.getTimestampParser("yyyy-MM-dd HH:mm:ss", TimeZone.getTimeZone("GMT+1"))
+                .parseDateTime("1970-01-01 00:00:00").getTime());
         assertEquals(-ONE_HOUR_IN_MILLIS, ts.getTime());
     }
 
     @Test
     public void testGetTimestampParser_LocalTimeZone() throws ParseException {
-        Timestamp ts = (Timestamp) DateUtil.getTimestampParser(
+        Timestamp ts = new Timestamp(DateUtil.getTimestampParser(
                 "yyyy-MM-dd HH:mm:ss",
-                TimeZone.getDefault()).parseObject("1970-01-01 00:00:00");
+                TimeZone.getDefault()).parseDateTime("1970-01-01 00:00:00").getTime());
         assertEquals(Timestamp.valueOf("1970-01-01 00:00:00"), ts);
     }
 
     @Test
     public void testGetTimeParser_DefaultTimeZone() throws ParseException {
-        Time time = (Time) DateUtil.getTimeParser("HH:mm:ss").parseObject("00:00:00");
+        Time time = new Time(DateUtil.getTimeParser("HH:mm:ss").parseDateTime("00:00:00").getTime());
         assertEquals(0, time.getTime());
     }
 
     @Test
     public void testGetTimeParser_CustomTimeZone() throws ParseException {
-        Time time = (Time) DateUtil.getTimeParser(
+        Time time = new Time(DateUtil.getTimeParser(
                 "HH:mm:ss",
-                TimeZone.getTimeZone("GMT+1")).parseObject("00:00:00");
+                TimeZone.getTimeZone("GMT+1")).parseDateTime("00:00:00").getTime());
         assertEquals(-ONE_HOUR_IN_MILLIS, time.getTime());
     }
 
     @Test
     public void testGetTimeParser_LocalTimeZone() throws ParseException {
-        Time time = (Time) DateUtil.getTimeParser(
-                "HH:mm:ss", TimeZone.getDefault()).parseObject("00:00:00");
+        Time time = new Time(DateUtil.getTimeParser(
+                "HH:mm:ss", TimeZone.getDefault()).parseDateTime("00:00:00").getTime());
         assertEquals(Time.valueOf("00:00:00"), time);
     }
 


[08/50] [abbrv] phoenix git commit: PHOENIX-514 Support functional indexes (Thomas D'Silva)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
index 91e4f36..964ac39 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
@@ -38,7 +38,7 @@ public class SaltingUtil {
     public static final String SALTING_COLUMN_NAME = "_SALT";
     public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY";
     public static final PColumnImpl SALTING_COLUMN = new PColumnImpl(
-            PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, SortOrder.getDefault(), 0, null, false);
+            PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, SortOrder.getDefault(), 0, null, false, null);
     public static final RowKeySchema VAR_BINARY_SALTED_SCHEMA = new RowKeySchemaBuilder(2)
         .addField(SALTING_COLUMN, false, SortOrder.getDefault())
         .addField(SchemaUtil.VAR_BINARY_DATUM, false, SortOrder.getDefault()).build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java
new file mode 100644
index 0000000..6fc480e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import static org.apache.phoenix.hbase.index.util.ImmutableBytesPtr.copyBytesIfNecessary;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+
+/**
+ * 
+ * Class used to construct a {@link Tuple} in order to evaluate an {@link Expression}
+ */
+public class ValueGetterTuple extends BaseTuple {
+	private ValueGetter valueGetter;
+    
+    public ValueGetterTuple(ValueGetter valueGetter) {
+        this.valueGetter = valueGetter;
+    }
+    
+    public ValueGetterTuple() {
+    }
+    
+    @Override
+    public void getKey(ImmutableBytesWritable ptr) {
+        ptr.set(valueGetter.getRowKey());
+    }
+
+    @Override
+    public boolean isImmutable() {
+        return true;
+    }
+
+    @Override
+    public KeyValue getValue(byte[] family, byte[] qualifier) {
+    	ImmutableBytesPtr value = null;
+        try {
+            value = valueGetter.getLatestValue(new ColumnReference(family, qualifier));
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    	return new KeyValue(valueGetter.getRowKey(), family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Put, value!=null? copyBytesIfNecessary(value) : null);
+    }
+
+    @Override
+    public String toString() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int size() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public KeyValue getValue(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public boolean getValue(byte[] family, byte[] qualifier,
+            ImmutableBytesWritable ptr) {
+        KeyValue kv = getValue(family, qualifier);
+        if (kv == null)
+            return false;
+        ptr.set(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+        return true;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 39e13bf..8dd4f4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -209,10 +209,10 @@ public class IndexUtil {
     }
 
     public static List<Mutation> generateIndexData(final PTable table, PTable index,
-            List<Mutation> dataMutations, ImmutableBytesWritable ptr, final KeyValueBuilder kvBuilder)
+            List<Mutation> dataMutations, ImmutableBytesWritable ptr, final KeyValueBuilder kvBuilder, PhoenixConnection connection)
             throws SQLException {
         try {
-            IndexMaintainer maintainer = index.getIndexMaintainer(table);
+            IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
             List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
            for (final Mutation dataMutation : dataMutations) {
                 long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
@@ -227,6 +227,11 @@ public class IndexUtil {
                     // TODO: is this more efficient than looking in our mutation map
                     // using the key plus finding the PColumn?
                     ValueGetter valueGetter = new ValueGetter() {
+                    	
+                    	@Override
+                        public byte[] getRowKey() {
+                    		return dataMutation.getRow();
+                    	}
         
                         @Override
                         public ImmutableBytesPtr getLatestValue(ColumnReference ref) {
@@ -267,6 +272,10 @@ public class IndexUtil {
         return column.getName().getString().startsWith(INDEX_COLUMN_NAME_SEP);
     }
     
+    public static boolean isIndexColumn(PColumn column) {
+        return column.getName().getString().contains(INDEX_COLUMN_NAME_SEP);
+    }
+    
     public static boolean getViewConstantValue(PColumn column, ImmutableBytesWritable ptr) {
         byte[] value = column.getViewConstant();
         if (value != null) {
@@ -441,7 +450,7 @@ public class IndexUtil {
         PhoenixStatement statement = new PhoenixStatement(conn);
         TableRef indexTableRef = new TableRef(index) {
             @Override
-            public String getColumnDisplayName(ColumnRef ref, boolean cfCaseSensitive, boolean cqCaseSensitive) {
+            public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
                 return '"' + ref.getColumn().getName().getString() + '"';
             }
         };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
index b91d8ca..06d21c2 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
@@ -41,7 +41,7 @@ public class ColumnExpressionTest {
         int maxLen = 30;
         int scale = 5;
         PColumn column = new PColumnImpl(PNameFactory.newName("c1"), PNameFactory.newName("f1"), PDecimal.INSTANCE, maxLen, scale,
-                true, 20, SortOrder.getDefault(), 0, null, false);
+                true, 20, SortOrder.getDefault(), 0, null, false, null);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -61,7 +61,7 @@ public class ColumnExpressionTest {
     public void testSerializationWithNullScale() throws Exception {
         int maxLen = 30;
         PColumn column = new PColumnImpl(PNameFactory.newName("c1"), PNameFactory.newName("f1"), PBinary.INSTANCE, maxLen, null,
-                true, 20, SortOrder.getDefault(), 0, null, false);
+                true, 20, SortOrder.getDefault(), 0, null, false, null);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -81,7 +81,7 @@ public class ColumnExpressionTest {
     public void testSerializationWithNullMaxLength() throws Exception {
         int scale = 5;
         PColumn column = new PColumnImpl(PNameFactory.newName("c1"), PNameFactory.newName("f1"), PVarchar.INSTANCE, null, scale,
-                true, 20, SortOrder.getDefault(), 0, null, false);
+                true, 20, SortOrder.getDefault(), 0, null, false, null);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -100,7 +100,7 @@ public class ColumnExpressionTest {
     @Test
     public void testSerializationWithNullScaleAndMaxLength() throws Exception {
         PColumn column = new PColumnImpl(PNameFactory.newName("c1"), PNameFactory.newName("f1"), PDecimal.INSTANCE, null, null, true,
-                20, SortOrder.getDefault(), 0, null, false);
+                20, SortOrder.getDefault(), 0, null, false, null);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
index 183f699..592ac7c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
@@ -69,13 +69,18 @@ public class IndexMaintainerTest  extends BaseConnectionlessQueryTest {
         testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, values, "", dataProps, indexProps);
     }
 
-    private static ValueGetter newValueGetter(final Map<ColumnReference, byte[]> valueMap) {
+    private static ValueGetter newValueGetter(final byte[] row, final Map<ColumnReference, byte[]> valueMap) {
         return new ValueGetter() {
 
             @Override
             public ImmutableBytesPtr getLatestValue(ColumnReference ref) {
                 return new ImmutableBytesPtr(valueMap.get(ref));
             }
+
+			@Override
+			public byte[] getRowKey() {
+				return row;
+			}
             
         };
     }
@@ -98,7 +103,7 @@ public class IndexMaintainerTest  extends BaseConnectionlessQueryTest {
             PTable table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName));
             PTable index = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(),fullIndexName));
             ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-            table.getIndexMaintainers(ptr);
+            table.getIndexMaintainers(ptr, pconn);
             List<IndexMaintainer> c1 = IndexMaintainer.deserialize(ptr, builder);
             assertEquals(1,c1.size());
             IndexMaintainer im1 = c1.get(0);
@@ -116,13 +121,14 @@ public class IndexMaintainerTest  extends BaseConnectionlessQueryTest {
             	Iterator<Pair<byte[],List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
             List<KeyValue> dataKeyValues = iterator.next().getSecond();
             Map<ColumnReference,byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
-            ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRow());
+            byte[] row = dataKeyValues.get(0).getRow();
+			ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row);
             Put dataMutation = new Put(rowKeyPtr.copyBytes());
             for (KeyValue kv : dataKeyValues) {
                 valueMap.put(new ColumnReference(kv.getFamily(),kv.getQualifier()), kv.getValue());
                 dataMutation.add(kv);
             }
-            ValueGetter valueGetter = newValueGetter(valueMap);
+            ValueGetter valueGetter = newValueGetter(row, valueMap);
             
             List<Mutation> indexMutations =
                     IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java
index f02738d..5624b51 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java
@@ -124,6 +124,11 @@ public class AggregateResultScannerTest extends BaseConnectionlessQueryTest {
             public boolean isViewReferenced() {
                 return false;
             }
+            
+            @Override
+            public String getExpressionStr() {
+                return null;
+            }
         })), null);
         aggregationManager.setAggregators(new ClientAggregators(Collections.<SingleAggregateFunction>singletonList(func), 1));
         ResultIterators iterators = new ResultIterators() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 6a2ce26..f81c3a9 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -376,39 +376,45 @@ public abstract class BaseTest {
                 "    kv bigint)\n");
         builder.put(INDEX_DATA_TABLE, "create table " + INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + INDEX_DATA_TABLE + "(" +
                 "   varchar_pk VARCHAR NOT NULL, " +
-                "   char_pk CHAR(5) NOT NULL, " +
+                "   char_pk CHAR(6) NOT NULL, " +
                 "   int_pk INTEGER NOT NULL, "+ 
                 "   long_pk BIGINT NOT NULL, " +
                 "   decimal_pk DECIMAL(31, 10) NOT NULL, " +
+                "   date_pk DATE NOT NULL, " +
                 "   a.varchar_col1 VARCHAR, " +
-                "   a.char_col1 CHAR(5), " +
+                "   a.char_col1 CHAR(10), " +
                 "   a.int_col1 INTEGER, " +
                 "   a.long_col1 BIGINT, " +
                 "   a.decimal_col1 DECIMAL(31, 10), " +
+                "   a.date1 DATE, " +
                 "   b.varchar_col2 VARCHAR, " +
-                "   b.char_col2 CHAR(5), " +
+                "   b.char_col2 CHAR(10), " +
                 "   b.int_col2 INTEGER, " +
                 "   b.long_col2 BIGINT, " +
-                "   b.decimal_col2 DECIMAL(31, 10) " +
-                "   CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk)) " +
+                "   b.decimal_col2 DECIMAL(31, 10), " +
+                "   b.date2 DATE " +
+                "   CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk, date_pk)) " +
                 "IMMUTABLE_ROWS=true");
         builder.put(MUTABLE_INDEX_DATA_TABLE, "create table " + INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + MUTABLE_INDEX_DATA_TABLE + "(" +
                 "   varchar_pk VARCHAR NOT NULL, " +
-                "   char_pk CHAR(5) NOT NULL, " +
+                "   char_pk CHAR(6) NOT NULL, " +
                 "   int_pk INTEGER NOT NULL, "+ 
                 "   long_pk BIGINT NOT NULL, " +
                 "   decimal_pk DECIMAL(31, 10) NOT NULL, " +
+                "   date_pk DATE NOT NULL, " +
                 "   a.varchar_col1 VARCHAR, " +
-                "   a.char_col1 CHAR(5), " +
+                "   a.char_col1 CHAR(10), " +
                 "   a.int_col1 INTEGER, " +
                 "   a.long_col1 BIGINT, " +
                 "   a.decimal_col1 DECIMAL(31, 10), " +
+                "   a.date1 DATE, " +
                 "   b.varchar_col2 VARCHAR, " +
-                "   b.char_col2 CHAR(5), " +
+                "   b.char_col2 CHAR(10), " +
                 "   b.int_col2 INTEGER, " +
                 "   b.long_col2 BIGINT, " +
-                "   b.decimal_col2 DECIMAL(31, 10) " +
-                "   CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk)) "
+                "   b.decimal_col2 DECIMAL(31, 10), " +
+                "   b.date2 DATE " +
+                "   CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk, date_pk)) "
                 );
         builder.put("SumDoubleTest","create table SumDoubleTest" +
                 "   (id varchar not null primary key, d DOUBLE, f FLOAT, ud UNSIGNED_DOUBLE, uf UNSIGNED_FLOAT, i integer, de decimal)");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto
index 89ceea3..348631f 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -46,6 +46,7 @@ message PColumn {
   optional int32 arraySize = 9;
   optional bytes viewConstant = 10;
   optional bool viewReferenced = 11;
+  optional string expression = 12;
 }
 
 message PTableStats {


[29/50] [abbrv] phoenix git commit: PHOENIX-1654 Incorrect group-by keys from ClientAggregatePlan

Posted by ma...@apache.org.
PHOENIX-1654 Incorrect group-by keys from ClientAggregatePlan


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fc299d5e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fc299d5e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fc299d5e

Branch: refs/heads/calcite
Commit: fc299d5e554cc5842fce96ece64058069fbea78c
Parents: e0a81a0
Author: maryannxue <we...@intel.com>
Authored: Thu Feb 12 15:39:23 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Thu Feb 12 15:39:23 2015 -0500

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/HashJoinIT.java   | 14 ++++++++++++++
 .../org/apache/phoenix/end2end/SortMergeJoinIT.java   | 14 ++++++++++++++
 .../apache/phoenix/execute/ClientAggregatePlan.java   |  2 +-
 3 files changed, 29 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc299d5e/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 5d2f522..03686f0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -2785,6 +2785,20 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                             + "GROUP BY t1.TID, t1.A, t2.A");
             upsertStmt.execute();
             conn.commit();            
+
+            rs = statement.executeQuery("SELECT * FROM " + joinTable);
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "1");
+            assertEquals(rs.getInt(2), 1);
+            assertEquals(rs.getInt(3), 2);
+            assertEquals(rs.getInt(4), 2);
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "1");
+            assertEquals(rs.getInt(2), 2);
+            assertEquals(rs.getInt(3), 1);
+            assertEquals(rs.getInt(4), 2);
+
+            assertFalse(rs.next());
         } finally {
             conn.close();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc299d5e/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
index 4503b5b..7912803 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
@@ -1805,6 +1805,20 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                             + "GROUP BY t1.TID, t1.A, t2.A");
             upsertStmt.execute();
             conn.commit();            
+
+            rs = statement.executeQuery("SELECT * FROM " + joinTable);
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "1");
+            assertEquals(rs.getInt(2), 1);
+            assertEquals(rs.getInt(3), 2);
+            assertEquals(rs.getInt(4), 2);
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "1");
+            assertEquals(rs.getInt(2), 2);
+            assertEquals(rs.getInt(3), 1);
+            assertEquals(rs.getInt(4), 2);
+
+            assertFalse(rs.next());
         } finally {
             conn.close();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc299d5e/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
index 59aab2d..30adbe9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
@@ -101,7 +101,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan {
                 }
                 iterator = new OrderedResultIterator(iterator, keyExpressionOrderBy, thresholdBytes, limit, projector.getEstimatedRowByteSize());
             }
-            aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, groupBy.getExpressions());
+            aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, groupBy.getKeyExpressions());
             aggResultIterator = new GroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators);
         }
 


[15/50] [abbrv] phoenix git commit: PHOENIX-1643 Ensure index usage is backward compatible for 4.2 client against 4.3 server (Thomas D'Silva)

Posted by ma...@apache.org.
PHOENIX-1643 Ensure index usage is backward compatible for 4.2 client against 4.3 server (Thomas D'Silva)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9db37bd9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9db37bd9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9db37bd9

Branch: refs/heads/calcite
Commit: 9db37bd90005695c703f9d092651891a5e39a80a
Parents: 54c4ed8
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Feb 6 16:08:43 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Feb 6 16:08:43 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/index/IndexMaintainer.java   |  9 ++------
 .../parse/IndexExpressionParseNodeRewriter.java |  6 ++---
 .../org/apache/phoenix/schema/PColumnImpl.java  |  2 +-
 .../java/org/apache/phoenix/util/IndexUtil.java | 16 +++++++------
 .../org/apache/phoenix/util/SchemaUtil.java     | 24 ++++++++++++--------
 5 files changed, 29 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9db37bd9/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 31f6c76..7199dad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -282,9 +282,6 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         int indexedExpressionCount = 0;
         for (int i = indexPosOffset; i<index.getPKColumns().size();i++) {
         	PColumn indexColumn = index.getPKColumns().get(i);
-        	if (!IndexUtil.isIndexColumn(indexColumn)) {
-                continue;
-            }
         	String indexColumnName = indexColumn.getName().getString();
             String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColumnName);
             String dataColumnName = IndexUtil.getDataColumnName(indexColumnName);
@@ -341,14 +338,12 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
         for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
             PColumn indexColumn = index.getPKColumns().get(i);
-            if (!IndexUtil.isIndexColumn(indexColumn)) {
-                continue;
-            }
             int indexPos = i - indexPosOffset;
             Expression expression = null;
             try {
                 expressionIndexCompiler.reset();
-                ParseNode parseNode  = SQLParser.parseCondition(indexColumn.getExpressionStr());
+                String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn);
+                ParseNode parseNode  = SQLParser.parseCondition(expressionStr);
                 expression = parseNode.accept(expressionIndexCompiler);
             } catch (SQLException e) {
                 throw new RuntimeException(e); // Impossible

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9db37bd9/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
index efa3835..43cb9f3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
@@ -71,10 +71,8 @@ public class IndexExpressionParseNodeRewriter extends ParseNodeRewriter {
         List<PColumn> pkColumns = index.getPKColumns();
 		for (int i=indexPosOffset; i<pkColumns.size(); ++i) {
         	PColumn column = pkColumns.get(i);
-            if (column.getExpressionStr()==null) {
-                continue;
-            }
-            ParseNode expressionParseNode = SQLParser.parseCondition(column.getExpressionStr());
+        	String expressionStr = IndexUtil.getIndexColumnExpressionStr(column);
+            ParseNode expressionParseNode  = SQLParser.parseCondition(expressionStr);
             columnParseNodeVisitor.reset();
             expressionParseNode.accept(columnParseNodeVisitor);
             String colName = column.getName().getString();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9db37bd9/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index 11cc53d..ac044df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -38,7 +38,7 @@ public class PColumnImpl implements PColumn {
     private byte[] viewConstant;
     private boolean isViewReferenced;
     private String expressionStr;
-
+    
     public PColumnImpl() {
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9db37bd9/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 8dd4f4d..c058eb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -137,12 +137,9 @@ public class IndexUtil {
         return name.substring(0,name.indexOf(INDEX_COLUMN_NAME_SEP));
     }
 
-    public static String getDataColumnFullName(String name) {
+    public static String getCaseSensitiveDataColumnFullName(String name) {
         int index = name.indexOf(INDEX_COLUMN_NAME_SEP) ;
-        if (index == 0) {
-            return name.substring(index+1);
-        }
-        return SchemaUtil.getColumnDisplayName(name.substring(0, index), name.substring(index+1));
+        return SchemaUtil.getCaseSensitiveColumnDisplayName(name.substring(0, index), name.substring(index+1));
     }
 
     public static String getIndexColumnName(String dataColumnFamilyName, String dataColumnName) {
@@ -272,8 +269,8 @@ public class IndexUtil {
         return column.getName().getString().startsWith(INDEX_COLUMN_NAME_SEP);
     }
     
-    public static boolean isIndexColumn(PColumn column) {
-        return column.getName().getString().contains(INDEX_COLUMN_NAME_SEP);
+    public static boolean isIndexColumn(String name) {
+        return name.contains(INDEX_COLUMN_NAME_SEP);
     }
     
     public static boolean getViewConstantValue(PColumn column, ImmutableBytesWritable ptr) {
@@ -627,4 +624,9 @@ public class IndexUtil {
             result.set(i, newCell);
         }
     }
+    
+    public static String getIndexColumnExpressionStr(PColumn col) {
+        return col.getExpressionStr() == null ? IndexUtil.getCaseSensitiveDataColumnFullName(col.getName().getString())
+                : col.getExpressionStr();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9db37bd9/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index afd61ad..c9574e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -206,14 +206,16 @@ public class SchemaUtil {
     }
 
     public static String getTableName(String schemaName, String tableName) {
-        return getName(schemaName,tableName);
+        return getName(schemaName,tableName, false);
     }
 
-    private static String getName(String optionalQualifier, String name) {
+    private static String getName(String optionalQualifier, String name, boolean caseSensitive) {
+        String cq = caseSensitive ? "\"" + name + "\"" : name;
         if (optionalQualifier == null || optionalQualifier.isEmpty()) {
-            return name;
+            return cq;
         }
-        return optionalQualifier + QueryConstants.NAME_SEPARATOR + name;
+        String cf = caseSensitive ? "\"" + optionalQualifier + "\"" : optionalQualifier;
+        return cf + QueryConstants.NAME_SEPARATOR + cq;
     }
 
     public static String getTableName(byte[] schemaName, byte[] tableName) {
@@ -225,21 +227,25 @@ public class SchemaUtil {
     }
 
     public static String getColumnDisplayName(String cf, String cq) {
-        return getName(cf == null || cf.isEmpty() ? null : cf, cq);
+        return getName(cf == null || cf.isEmpty() ? null : cf, cq, false);
+    }
+    
+    public static String getCaseSensitiveColumnDisplayName(String cf, String cq) {
+        return getName(cf == null || cf.isEmpty() ? null : cf, cq, true);
     }
 
     public static String getMetaDataEntityName(String schemaName, String tableName, String familyName, String columnName) {
         if ((schemaName == null || schemaName.isEmpty()) && (tableName == null || tableName.isEmpty())) {
-            return getName(familyName, columnName);
+            return getName(familyName, columnName, false);
         }
         if ((familyName == null || familyName.isEmpty()) && (columnName == null || columnName.isEmpty())) {
-            return getName(schemaName, tableName);
+            return getName(schemaName, tableName, false);
         }
-        return getName(getName(schemaName, tableName), getName(familyName, columnName));
+        return getName(getName(schemaName, tableName, false), getName(familyName, columnName, false), false);
     }
 
     public static String getColumnName(String familyName, String columnName) {
-        return getName(familyName, columnName);
+        return getName(familyName, columnName, false);
     }
 
     public static byte[] getTableNameAsBytes(String schemaName, String tableName) {


[48/50] [abbrv] phoenix git commit: PHOENIX-1696 Selecting column more than once fails (Maryann Xue, Samarth Jain)

Posted by ma...@apache.org.
PHOENIX-1696 Selecting column more than once fails (Maryann Xue, Samarth Jain)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9a546b9c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9a546b9c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9a546b9c

Branch: refs/heads/calcite
Commit: 9a546b9c89a3920345ea35ad503dfb360d4c34a5
Parents: 49f06b3
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Mar 4 08:27:51 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Mar 4 08:27:51 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/QueryMoreIT.java  | 14 ++++++++++++++
 .../java/org/apache/phoenix/compile/RowProjector.java |  3 ---
 2 files changed, 14 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9a546b9c/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index af5e6fa..e725376 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -33,6 +35,7 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 
 import com.google.common.collect.Lists;
@@ -314,4 +317,15 @@ public class QueryMoreIT extends BaseHBaseManagedTimeIT {
         sb.append(")");
         return sb.toString();
     }
+    
+    @Test // see - https://issues.apache.org/jira/browse/PHOENIX-1696
+    public void testSelectColumnMoreThanOnce() throws Exception {
+        Date date = new Date(System.currentTimeMillis());
+        initEntityHistoryTableValues("abcd", getDefaultSplits("abcd"), date, 100l);
+        String query = "SELECT NEW_VALUE, NEW_VALUE FROM " + TestUtil.ENTITY_HISTORY_TABLE_NAME + " LIMIT 1";
+        ResultSet rs = DriverManager.getConnection(getUrl()).createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        rs.getObject("NEW_VALUE");
+        assertFalse(rs.next());
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9a546b9c/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java
index 364ebd6..1b35e92 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java
@@ -139,9 +139,6 @@ public class RowProjector {
                 throw new ColumnNotFoundException(name);
             }
         }
-        if (index.size() > 1) {
-            throw new AmbiguousColumnException(name);
-        }
         
         return index.get(0);
     }


[07/50] [abbrv] phoenix git commit: PHOENIX-1248 CsvBulkLoadTool is failing with IAE when local index specified for --index-table parameter(Gabriel Reid)

Posted by ma...@apache.org.
PHOENIX-1248 CsvBulkLoadTool is failing with IAE when local index specified for --index-table parameter(Gabriel Reid)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d6e7846f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d6e7846f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d6e7846f

Branch: refs/heads/calcite
Commit: d6e7846f491e09780a2d663cc5c23bc21244e26c
Parents: 3f48938
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Sun Feb 1 09:28:29 2015 -0800
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Sun Feb 1 09:28:29 2015 -0800

----------------------------------------------------------------------
 .../phoenix/mapreduce/CsvBulkLoadToolIT.java    | 79 ++++++++++++++----
 .../phoenix/mapreduce/CsvBulkLoadTool.java      | 85 ++++++++++++++------
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  |  3 +-
 3 files changed, 126 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6e7846f/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
index 4373f47..0501142 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
@@ -17,12 +17,6 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 import java.io.PrintWriter;
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -42,6 +36,12 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 @Category(NeedsOwnMiniClusterTest.class)
 public class CsvBulkLoadToolIT {
 
@@ -191,17 +191,62 @@ public class CsvBulkLoadToolIT {
         rs.close();
         stmt.close();
     }
-    
+
     @Test
-    public void testImportOneIndexTable() throws Exception {
+    public void testImportWithLocalIndex() throws Exception {
 
         Statement stmt = conn.createStatement();
-        stmt.execute("CREATE TABLE TABLE4 (ID INTEGER NOT NULL PRIMARY KEY, " +
-            "FIRST_NAME VARCHAR, LAST_NAME VARCHAR)");
-        String ddl = "CREATE INDEX TABLE4_IDX ON TABLE4 "
+        stmt.execute("CREATE TABLE TABLE6 (ID INTEGER NOT NULL PRIMARY KEY, " +
+                "FIRST_NAME VARCHAR, LAST_NAME VARCHAR)");
+        String ddl = "CREATE LOCAL INDEX TABLE6_IDX ON TABLE6 "
                 + " (FIRST_NAME ASC)";
         stmt.execute(ddl);
-        
+
+        FileSystem fs = FileSystem.get(hbaseTestUtil.getConfiguration());
+        FSDataOutputStream outputStream = fs.create(new Path("/tmp/input3.csv"));
+        PrintWriter printWriter = new PrintWriter(outputStream);
+        printWriter.println("1,FirstName 1,LastName 1");
+        printWriter.println("2,FirstName 2,LastName 2");
+        printWriter.close();
+
+        CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+        csvBulkLoadTool.setConf(hbaseTestUtil.getConfiguration());
+        int exitCode = csvBulkLoadTool.run(new String[] {
+                "--input", "/tmp/input3.csv",
+                "--table", "table6",
+                "--zookeeper", zkQuorum});
+        assertEquals(0, exitCode);
+
+        ResultSet rs = stmt.executeQuery("SELECT id, FIRST_NAME FROM TABLE6 where first_name='FirstName 2'");
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertEquals("FirstName 2", rs.getString(2));
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testImportOneIndexTable() throws Exception {
+        testImportOneIndexTable("TABLE4", false);
+    }
+
+    @Test
+    public void testImportOneLocalIndexTable() throws Exception {
+        testImportOneIndexTable("TABLE5", true);
+    }
+
+    public void testImportOneIndexTable(String tableName, boolean localIndex) throws Exception {
+
+        String indexTableName = String.format("%s_IDX", tableName);
+        Statement stmt = conn.createStatement();
+        stmt.execute("CREATE TABLE " + tableName + "(ID INTEGER NOT NULL PRIMARY KEY, "
+                + "FIRST_NAME VARCHAR, LAST_NAME VARCHAR)");
+        String ddl =
+                "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexTableName + " ON "
+                        + tableName + "(FIRST_NAME ASC)";
+        stmt.execute(ddl);
+
         FileSystem fs = FileSystem.get(hbaseTestUtil.getConfiguration());
         FSDataOutputStream outputStream = fs.create(new Path("/tmp/input4.csv"));
         PrintWriter printWriter = new PrintWriter(outputStream);
@@ -213,14 +258,14 @@ public class CsvBulkLoadToolIT {
         csvBulkLoadTool.setConf(hbaseTestUtil.getConfiguration());
         int exitCode = csvBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input4.csv",
-                "--table", "table4",
-                "--index-table", "TABLE4_IDX",
-                "--zookeeper", zkQuorum});
+                "--table", tableName,
+                "--index-table", indexTableName,
+                "--zookeeper", zkQuorum });
         assertEquals(0, exitCode);
 
-        ResultSet rs = stmt.executeQuery("SELECT * FROM TABLE4");
+        ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName);
         assertFalse(rs.next());
-        rs = stmt.executeQuery("SELECT FIRST_NAME FROM TABLE4 where FIRST_NAME='FirstName 1'");
+        rs = stmt.executeQuery("SELECT FIRST_NAME FROM " + tableName + " where FIRST_NAME='FirstName 1'");
         assertTrue(rs.next());
         assertEquals("FirstName 1", rs.getString(1));
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6e7846f/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index 54e3f2c..c92a3a3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -61,8 +61,10 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.job.JobManager;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
@@ -212,38 +214,39 @@ public class CsvBulkLoadTool extends Configured implements Tool {
             outputPath = new Path("/tmp/" + UUID.randomUUID());
         }
         
-        List<String> tablesToBeLoaded = new ArrayList<String>();
-        tablesToBeLoaded.add(qualifiedTableName);
+        List<TargetTableRef> tablesToBeLoaded = new ArrayList<TargetTableRef>();
+        tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName));
         tablesToBeLoaded.addAll(getIndexTables(conn, schemaName, qualifiedTableName));
         
         // When loading a single index table, check index table name is correct
         if(qualifedIndexTableName != null){
-        	boolean exists = false;
-        	for(String tmpTable : tablesToBeLoaded){
-        		if(tmpTable.compareToIgnoreCase(qualifedIndexTableName) == 0) {
-        			exists = true;
+            TargetTableRef targetIndexRef = null;
+        	for (TargetTableRef tmpTable : tablesToBeLoaded){
+        		if(tmpTable.getLogicalName().compareToIgnoreCase(qualifedIndexTableName) == 0) {
+                    targetIndexRef = tmpTable;
         			break;
         		}
         	}
-        	if(!exists){
+        	if(targetIndexRef == null){
                 throw new IllegalStateException("CSV Bulk Loader error: index table " +
                     qualifedIndexTableName + " doesn't exist");
         	}
         	tablesToBeLoaded.clear();
-        	tablesToBeLoaded.add(qualifedIndexTableName);
+        	tablesToBeLoaded.add(targetIndexRef);
         }
         
         List<Future<Boolean>> runningJobs = new ArrayList<Future<Boolean>>();
         ExecutorService executor =  JobManager.createThreadPoolExec(Integer.MAX_VALUE, 5, 20);
         try{
-	        for(String table : tablesToBeLoaded) {
-	        	Path tablePath = new Path(outputPath, table);
+	        for (TargetTableRef table : tablesToBeLoaded) {
+	        	Path tablePath = new Path(outputPath, table.getPhysicalName());
 	        	Configuration jobConf = new Configuration(conf);
 	        	jobConf.set(CsvToKeyValueMapper.TABLE_NAME_CONFKEY, qualifiedTableName);
-	        	if(qualifiedTableName.compareToIgnoreCase(table) != 0) {
-	        		jobConf.set(CsvToKeyValueMapper.INDEX_TABLE_NAME_CONFKEY, table);
+	        	if(qualifiedTableName.compareToIgnoreCase(table.getLogicalName()) != 0) {
+                    jobConf.set(CsvToKeyValueMapper.INDEX_TABLE_NAME_CONFKEY, table.getPhysicalName());
 	        	}
-	        	TableLoader tableLoader = new TableLoader(jobConf, table, inputPath, tablePath);
+	        	TableLoader tableLoader = new TableLoader(
+                        jobConf, table.getPhysicalName(), inputPath, tablePath);
 	        	runningJobs.add(executor.submit(tableLoader));
 	        }
         } finally {
@@ -392,20 +395,56 @@ public class CsvBulkLoadTool extends Configured implements Tool {
     }
     
     /**
-     * Get names of index tables of current data table
+     * Get the index tables of current data table
      * @throws java.sql.SQLException
      */
-    private List<String> getIndexTables(Connection conn, String schemaName, String tableName) 
+    private List<TargetTableRef> getIndexTables(Connection conn, String schemaName, String qualifiedTableName)
         throws SQLException {
-        PTable table = PhoenixRuntime.getTable(conn, tableName);
-        List<String> indexTables = new ArrayList<String>();
+        PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName);
+        List<TargetTableRef> indexTables = new ArrayList<TargetTableRef>();
         for(PTable indexTable : table.getIndexes()){
-        	indexTables.add(getQualifiedTableName(schemaName, 
-                indexTable.getTableName().getString()));
+            if (indexTable.getIndexType() == IndexType.LOCAL) {
+                indexTables.add(
+                        new TargetTableRef(getQualifiedTableName(schemaName,
+                                indexTable.getTableName().getString()),
+                                MetaDataUtil.getLocalIndexTableName(qualifiedTableName)));
+            } else {
+                indexTables.add(new TargetTableRef(getQualifiedTableName(schemaName,
+                        indexTable.getTableName().getString())));
+            }
         }
         return indexTables;
     }
-    
+
+    /**
+     * Represents the logical and physical name of a single table to which data is to be loaded.
+     *
+     * This class exists to allow for the difference between HBase physical table names and
+     * Phoenix logical table names.
+     */
+    private static class TargetTableRef {
+
+        private final String logicalName;
+        private final String physicalName;
+
+        private TargetTableRef(String name) {
+            this(name, name);
+        }
+
+        private TargetTableRef(String logicalName, String physicalName) {
+            this.logicalName = logicalName;
+            this.physicalName = physicalName;
+        }
+
+        public String getLogicalName() {
+            return logicalName;
+        }
+
+        public String getPhysicalName() {
+            return physicalName;
+        }
+    }
+
     /**
      * A runnable to load data into a single table
      *
@@ -445,9 +484,9 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 
 	            // initialize credentials to possibily run in a secure env
 	            TableMapReduceUtil.initCredentials(job);
-	            
-	            HTable htable = new HTable(conf, tableName);
-	
+
+                HTable htable = new HTable(conf, tableName);
+
 	            // Auto configure partitioner and reducer according to the Main Data table
 	            HFileOutputFormat.configureIncrementalLoad(job, htable);
 	

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6e7846f/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
index ead5067..6ff7ba3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
@@ -36,6 +36,7 @@ import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+
 import org.apache.commons.csv.CSVFormat;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
@@ -165,7 +166,7 @@ public class CsvToKeyValueMapper extends Mapper<LongWritable,Text,ImmutableBytes
                     = PhoenixRuntime.getUncommittedDataIterator(conn, true);
             while (uncommittedDataIterator.hasNext()) {
                 Pair<byte[], List<KeyValue>> kvPair = uncommittedDataIterator.next();
-                if(Bytes.compareTo(tableName, kvPair.getFirst()) != 0) {
+                if (Bytes.compareTo(tableName, kvPair.getFirst()) != 0) {
                 	// skip edits for other tables
                 	continue;
                 }


[16/50] [abbrv] phoenix git commit: PHOENIX-1641 Make the upgrade from 4.x to 4.3 work for SYSTEM.CATALOG and SYSTEM.SEQUENCE

Posted by ma...@apache.org.
PHOENIX-1641 Make the upgrade from 4.x to 4.3 work for  SYSTEM.CATALOG and SYSTEM.SEQUENCE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eaa7fbfd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eaa7fbfd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eaa7fbfd

Branch: refs/heads/calcite
Commit: eaa7fbfde9f45c635cc11d524977d54485c9ea8d
Parents: 9db37bd
Author: Samarth <sa...@salesforce.com>
Authored: Fri Feb 6 16:29:09 2015 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Feb 6 16:29:09 2015 -0800

----------------------------------------------------------------------
 .../query/ConnectionQueryServicesImpl.java      |  69 +++++--
 .../org/apache/phoenix/util/UpgradeUtil.java    | 179 ++++++++++---------
 2 files changed, 146 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eaa7fbfd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 6d58f57..7763a0a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -136,6 +136,7 @@ import org.apache.phoenix.schema.stats.StatisticsUtil;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PUnsignedTinyint;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.ConfigUtil;
@@ -1757,8 +1758,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     }
 
-    // Keeping this to use for further upgrades
-    protected PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection,
+    /** 
+     * Keeping this to use for further upgrades. This method closes the oldMetaConnection.
+     */
+    private PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection,
         String tableName, long timestamp, String columns) throws SQLException {
 
         Properties props = new Properties(oldMetaConnection.getClientInfo());
@@ -1826,7 +1829,29 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             } catch (NewerTableAlreadyExistsException ignore) {
                                 // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed timestamp.
                                 // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp.
-                            } catch (TableAlreadyExistsException ignore) {
+                            } catch (TableAlreadyExistsException e) {
+                                // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include
+                                // any new columns we've added.
+                                long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
+                                
+                                // We know that we always need to add the STORE_NULLS column for 4.3 release
+                                String columnsToAdd = PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                
+                                // If the server side schema is 4 versions behind then we need to add INDEX_TYPE
+                                // and INDEX_DISABLE_TIMESTAMP columns too.
+                                // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed,
+                                // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all
+                                // the column names that have been added to SYSTEM.CATALOG since 4.0.
+                                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 4) {
+                                    columnsToAdd += ", " + PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
+                                            + ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName();
+                                }
+                                
+                                // Ugh..need to assign to another local variable to keep eclipse happy.
+                                PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
+                                        PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                        MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
+                                metaConnection = newMetaConnection;
                             }
                             int nSaltBuckets = ConnectionQueryServicesImpl.this.props.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB,
                                     QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
@@ -1840,20 +1865,34 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 Integer sequenceSaltBuckets = e.getTable().getBucketNum();
                                 nSequenceSaltBuckets = sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets;
                             } catch (TableAlreadyExistsException e) {
-                                // This will occur if we have an older SYSTEM.SEQUENCE, so we need to update it to include
+                                // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to include
                                 // any new columns we've added.
-                                if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSaltBuckets, e.getTable())) {
-                                    metaConnection.removeTable(null,
-                                            PhoenixDatabaseMetaData.SEQUENCE_SCHEMA_NAME,
-                                            PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
-                                    clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY,
-                                            PhoenixDatabaseMetaData.SEQUENCE_SCHEMA_NAME_BYTES,
-                                            PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME_BYTES,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
-                                    clearTableRegionCache(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
+                                long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
+                                // if the table is at a timestamp corresponding to before 4.2.1 then run the upgrade script
+                                if (currentServerSideTableTimeStamp <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 2) {
+                                    if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSaltBuckets, e.getTable())) {
+                                        metaConnection.removeTable(null,
+                                                PhoenixDatabaseMetaData.SEQUENCE_SCHEMA_NAME,
+                                                PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
+                                        clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY,
+                                                PhoenixDatabaseMetaData.SEQUENCE_SCHEMA_NAME_BYTES,
+                                                PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME_BYTES,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
+                                        clearTableRegionCache(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
+                                    }
+                                    nSequenceSaltBuckets = nSaltBuckets;
+                                } 
+                                if (currentServerSideTableTimeStamp <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 3) {
+                                    // If the table time stamp is before 4.1.0 then we need to add below columns
+                                    // to the SYSTEM.SEQUENCE table.
+                                    String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() 
+                                            + ", " + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName()
+                                            + ", " + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName()
+                                            + ", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                    addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
                                 }
-                                nSequenceSaltBuckets = nSaltBuckets;
                             }
                             try {
                                 metaConnection.createStatement().executeUpdate(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eaa7fbfd/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index a3fee72..a92223b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -242,106 +242,110 @@ public class UpgradeUtil {
                 logger.info("SYSTEM.SEQUENCE table has already been upgraded");
                 return false;
             }
+            
+            // if the SYSTEM.SEQUENCE table is for 4.1.0 or before then we need to salt the table
+            if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP - 3) {
+                int batchSizeBytes = 100 * 1024; // 100K chunks
+                int sizeBytes = 0;
+                List<Mutation> mutations =  Lists.newArrayListWithExpectedSize(10000);
 
-            int batchSizeBytes = 100 * 1024; // 100K chunks
-            int sizeBytes = 0;
-            List<Mutation> mutations =  Lists.newArrayListWithExpectedSize(10000);
-    
-            boolean success = false;
-            Scan scan = new Scan();
-            scan.setRaw(true);
-            scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
-            HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
-            try {
-                boolean committed = false;
-                logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
-                ResultScanner scanner = seqTable.getScanner(scan);
+                boolean success = false;
+                Scan scan = new Scan();
+                scan.setRaw(true);
+                scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
+                HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
                 try {
-                    Result result;
-                     while ((result = scanner.next()) != null) {
-                        for (KeyValue keyValue : result.raw()) {
-                            KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets);
-                            if (newKeyValue != null) {
-                                sizeBytes += newKeyValue.getLength();
-                                if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Put) {
-                                    // Delete old value
-                                    byte[] buf = keyValue.getBuffer();
-                                    Delete delete = new Delete(keyValue.getRow());
-                                    KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(),
-                                            buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
-                                            buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
-                                            keyValue.getTimestamp(), KeyValue.Type.Delete,
-                                            ByteUtil.EMPTY_BYTE_ARRAY,0,0);
-                                    delete.addDeleteMarker(deleteKeyValue);
-                                    mutations.add(delete);
-                                    sizeBytes += deleteKeyValue.getLength();
-                                    // Put new value
-                                    Put put = new Put(newKeyValue.getRow());
-                                    put.add(newKeyValue);
-                                    mutations.add(put);
-                                } else if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Delete){
-                                    // Copy delete marker using new key so that it continues
-                                    // to delete the key value preceding it that will be updated
-                                    // as well.
-                                    Delete delete = new Delete(newKeyValue.getRow());
-                                    delete.addDeleteMarker(newKeyValue);
-                                    mutations.add(delete);
+                    boolean committed = false;
+                    logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
+                    ResultScanner scanner = seqTable.getScanner(scan);
+                    try {
+                        Result result;
+                        while ((result = scanner.next()) != null) {
+                            for (KeyValue keyValue : result.raw()) {
+                                KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets);
+                                if (newKeyValue != null) {
+                                    sizeBytes += newKeyValue.getLength();
+                                    if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Put) {
+                                        // Delete old value
+                                        byte[] buf = keyValue.getBuffer();
+                                        Delete delete = new Delete(keyValue.getRow());
+                                        KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(),
+                                                buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
+                                                buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
+                                                keyValue.getTimestamp(), KeyValue.Type.Delete,
+                                                ByteUtil.EMPTY_BYTE_ARRAY,0,0);
+                                        delete.addDeleteMarker(deleteKeyValue);
+                                        mutations.add(delete);
+                                        sizeBytes += deleteKeyValue.getLength();
+                                        // Put new value
+                                        Put put = new Put(newKeyValue.getRow());
+                                        put.add(newKeyValue);
+                                        mutations.add(put);
+                                    } else if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Delete){
+                                        // Copy delete marker using new key so that it continues
+                                        // to delete the key value preceding it that will be updated
+                                        // as well.
+                                        Delete delete = new Delete(newKeyValue.getRow());
+                                        delete.addDeleteMarker(newKeyValue);
+                                        mutations.add(delete);
+                                    }
+                                }
+                                if (sizeBytes >= batchSizeBytes) {
+                                    logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
+                                    seqTable.batch(mutations);
+                                    mutations.clear();
+                                    sizeBytes = 0;
+                                    committed = true;
                                 }
-                            }
-                            if (sizeBytes >= batchSizeBytes) {
-                                logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
-                                seqTable.batch(mutations);
-                                mutations.clear();
-                                sizeBytes = 0;
-                                committed = true;
                             }
                         }
-                    }
-                    if (!mutations.isEmpty()) {
-                        logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
-                        seqTable.batch(mutations);
-                    }
-                    preSplitSequenceTable(conn, nSaltBuckets);
-                    logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
-                    success = true;
-                    return true;
-                } catch (InterruptedException e) {
-                    throw ServerUtil.parseServerException(e);
-                } finally {
-                    try {
-                        scanner.close();
+                        if (!mutations.isEmpty()) {
+                            logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
+                            seqTable.batch(mutations);
+                        }
+                        preSplitSequenceTable(conn, nSaltBuckets);
+                        logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
+                        success = true;
+                        return true;
+                    } catch (InterruptedException e) {
+                        throw ServerUtil.parseServerException(e);
                     } finally {
-                        if (!success) {
-                            if (!committed) { // Try to recover by setting salting back to off, as we haven't successfully committed anything
-                                // Don't use Delete here as we'd never be able to change it again at this timestamp.
-                                KeyValue unsaltKV = KeyValueUtil.newKeyValue(seqTableKey, 
-                                        PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
-                                        PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
-                                        MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
-                                        PInteger.INSTANCE.toBytes(0));
-                                Put unsaltPut = new Put(seqTableKey);
-                                unsaltPut.add(unsaltKV);
-                                try {
-                                    sysTable.put(unsaltPut);
-                                    success = true;
-                                } finally {
-                                    if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                        try {
+                            scanner.close();
+                        } finally {
+                            if (!success) {
+                                if (!committed) { // Try to recover by setting salting back to off, as we haven't successfully committed anything
+                                    // Don't use Delete here as we'd never be able to change it again at this timestamp.
+                                    KeyValue unsaltKV = KeyValueUtil.newKeyValue(seqTableKey, 
+                                            PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                            PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
+                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
+                                            PInteger.INSTANCE.toBytes(0));
+                                    Put unsaltPut = new Put(seqTableKey);
+                                    unsaltPut.add(unsaltKV);
+                                    try {
+                                        sysTable.put(unsaltPut);
+                                        success = true;
+                                    } finally {
+                                        if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                                    }
+                                } else { // We're screwed b/c we've already committed some salted sequences...
+                                    logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                 }
-                            } else { // We're screwed b/c we've already committed some salted sequences...
-                                logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                             }
                         }
                     }
-                }
-            } catch (IOException e) {
-                throw ServerUtil.parseServerException(e);
-            } finally {
-                try {
-                    seqTable.close();
                 } catch (IOException e) {
-                    logger.warn("Exception during close",e);
+                    throw ServerUtil.parseServerException(e);
+                } finally {
+                    try {
+                        seqTable.close();
+                    } catch (IOException e) {
+                        logger.warn("Exception during close",e);
+                    }
                 }
             }
+            return false;
         } catch (IOException e) {
             throw ServerUtil.parseServerException(e);
         } finally {
@@ -351,6 +355,7 @@ public class UpgradeUtil {
                 logger.warn("Exception during close",e);
             }
         }
+        
     }
     
     @SuppressWarnings("deprecation")


[37/50] [abbrv] phoenix git commit: PHOENIX-1680 phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java

Posted by ma...@apache.org.
PHOENIX-1680 phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/05723b19
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/05723b19
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/05723b19

Branch: refs/heads/calcite
Commit: 05723b19cc107b72d9f752cd0441af260fc62e22
Parents: 3d50147
Author: maryannxue <we...@intel.com>
Authored: Wed Feb 25 17:33:41 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Wed Feb 25 17:33:41 2015 -0500

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 90 ++++++++++++++++++++
 .../apache/phoenix/end2end/SortMergeJoinIT.java | 78 ++++++++++++++++-
 .../apache/phoenix/compile/QueryCompiler.java   | 37 ++++----
 3 files changed, 188 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/05723b19/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 03686f0..e915b36 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -41,6 +41,7 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
@@ -464,6 +465,21 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                /*
+                 * testJoinWithSetMaxRows()
+                 *     statement.setMaxRows(4);
+                 *     SELECT order_id, i.name, quantity FROM joinItemTable i
+                 *     JOIN joinOrderTable o ON o.item_id = i.item_id;
+                 *     SELECT o.order_id, i.name, o.quantity FROM joinItemTable i
+                 *     JOIN (SELECT order_id, item_id, quantity FROM joinOrderTable) o
+                 *     ON o.item_id = i.item_id;
+                 */
+                "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
+                "CLIENT 4 ROW LIMIT\n" +
+                "    PARALLEL INNER-JOIN TABLE 0\n" +
+                "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")\n" +
+                "    JOIN-SCANNER 4 ROW LIMIT",
                 }});
         testCases.add(new String[][] {
                 {
@@ -831,6 +847,21 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                /*
+                 * testJoinWithSetMaxRows()
+                 *     statement.setMaxRows(4);
+                 *     SELECT order_id, i.name, quantity FROM joinItemTable i
+                 *     JOIN joinOrderTable o ON o.item_id = i.item_id;
+                 *     SELECT o.order_id, i.name, o.quantity FROM joinItemTable i
+                 *     JOIN (SELECT order_id, item_id, quantity FROM joinOrderTable) o
+                 *     ON o.item_id = i.item_id;
+                 */
+                "CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_SCHEMA + ".idx_item\n" +
+                "    SERVER FILTER BY FIRST KEY ONLY\n" +
+                "CLIENT 4 ROW LIMIT\n" +
+                "    PARALLEL INNER-JOIN TABLE 0\n" +
+                "        CLIENT PARALLEL 1-WAY FULL SCAN OVER "+ JOIN_SCHEMA + ".OrderTable\n" +
+                "    JOIN-SCANNER 4 ROW LIMIT",
                 }});
         testCases.add(new String[][] {
                 {
@@ -1221,6 +1252,23 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 4-WAY FULL SCAN OVER TEMP_TABLE_COMPOSITE_PK\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    DYNAMIC SERVER FILTER BY (COL0, COL1, COL2) IN ((RHS.COL1, RHS.COL2, TO_INTEGER((RHS.COL3 - 1))))",
+                /*
+                 * testJoinWithSetMaxRows()
+                 *     statement.setMaxRows(4);
+                 *     SELECT order_id, i.name, quantity FROM joinItemTable i
+                 *     JOIN joinOrderTable o ON o.item_id = i.item_id;
+                 *     SELECT o.order_id, i.name, o.quantity FROM joinItemTable i
+                 *     JOIN (SELECT order_id, item_id, quantity FROM joinOrderTable) o
+                 *     ON o.item_id = i.item_id;
+                 */
+                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
+                "    SERVER FILTER BY FIRST KEY ONLY\n" +
+                "CLIENT MERGE SORT\n" +
+                "CLIENT 4 ROW LIMIT\n" +
+                "    PARALLEL INNER-JOIN TABLE 0\n" +
+                "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
+                "    DYNAMIC SERVER FILTER BY \"item_id\" IN (\"O.item_id\")\n" +
+                "    JOIN-SCANNER 4 ROW LIMIT",
                 }});
         return testCases;
     }
@@ -3734,6 +3782,48 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
         }
     }
 
+    @Test
+    public void testJoinWithSetMaxRows() throws Exception {
+        String [] queries = new String[2];
+        queries[0] = "SELECT \"order_id\", i.name, quantity FROM " + JOIN_ITEM_TABLE_FULL_NAME + " i JOIN "
+                + JOIN_ORDER_TABLE_FULL_NAME + " o ON o.\"item_id\" = i.\"item_id\"";
+        queries[1] = "SELECT o.\"order_id\", i.name, o.quantity FROM " + JOIN_ITEM_TABLE_FULL_NAME + " i JOIN " 
+                + "(SELECT \"order_id\", \"item_id\", quantity FROM " + JOIN_ORDER_TABLE_FULL_NAME + ") o " 
+                + "ON o.\"item_id\" = i.\"item_id\"";
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            for (String query : queries) {
+                Statement statement = conn.createStatement();
+                statement.setMaxRows(4);
+                ResultSet rs = statement.executeQuery(query);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000001");
+                assertEquals(rs.getString(2), "T1");
+                assertEquals(rs.getInt(3), 1000);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000003");
+                assertEquals(rs.getString(2), "T2");
+                assertEquals(rs.getInt(3), 3000);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000005");
+                assertEquals(rs.getString(2), "T3");
+                assertEquals(rs.getInt(3), 5000);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000002");
+                assertEquals(rs.getString(2), "T6");
+                assertEquals(rs.getInt(3), 2000);
+
+                assertFalse(rs.next());
+                
+                rs = statement.executeQuery("EXPLAIN " + query);
+                assertEquals(plans[25], QueryUtil.getExplainPlan(rs));
+            }
+        } finally {
+            conn.close();
+        }
+    }
+
 }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/05723b19/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
index 7912803..6f14a45 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
@@ -40,6 +40,7 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
@@ -120,6 +121,14 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "    CLIENT SORTED BY [\"I.supplier_id\"]",
+                
+                "SORT-MERGE-JOIN (INNER) TABLES\n" +
+                "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
+                "AND\n" +
+                "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
+                "        SERVER SORTED BY [\"O.item_id\"]\n" +
+                "    CLIENT MERGE SORT\n" +
+                "CLIENT 4 ROW LIMIT",
                 }});
         testCases.add(new String[][] {
                 {
@@ -142,7 +151,18 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
                 "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [\"I.0:supplier_id\"]"
+                "    CLIENT SORTED BY [\"I.0:supplier_id\"]",
+                
+                "SORT-MERGE-JOIN (INNER) TABLES\n" +
+                "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
+                "        SERVER FILTER BY FIRST KEY ONLY\n" +
+                "        SERVER SORTED BY [\"I.:item_id\"]\n" +
+                "    CLIENT MERGE SORT\n" +
+                "AND\n" +
+                "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
+                "        SERVER SORTED BY [\"O.item_id\"]\n" +
+                "    CLIENT MERGE SORT\n" +
+                "CLIENT 4 ROW LIMIT",
                 }});
         testCases.add(new String[][] {
                 {
@@ -165,7 +185,18 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "            SERVER FILTER BY QUANTITY < 5000\n" +
                 "            SERVER SORTED BY [\"O.item_id\"]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [\"I.0:supplier_id\"]"
+                "    CLIENT SORTED BY [\"I.0:supplier_id\"]",
+                
+                "SORT-MERGE-JOIN (INNER) TABLES\n" +
+                "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
+                "        SERVER FILTER BY FIRST KEY ONLY\n" +
+                "        SERVER SORTED BY [\"I.:item_id\"]\n" +
+                "    CLIENT MERGE SORT\n" +
+                "AND\n" +
+                "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
+                "        SERVER SORTED BY [\"O.item_id\"]\n" +
+                "    CLIENT MERGE SORT\n" +
+                "CLIENT 4 ROW LIMIT",
                 }});
         return testCases;
     }
@@ -2584,5 +2615,48 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
         }
     }
 
+    @Test
+    public void testJoinWithSetMaxRows() throws Exception {
+        String [] queries = new String[2];
+        queries[0] = "SELECT /*+ USE_SORT_MERGE_JOIN*/ \"order_id\", i.name, quantity FROM " + JOIN_ITEM_TABLE_FULL_NAME + " i JOIN "
+                + JOIN_ORDER_TABLE_FULL_NAME + " o ON o.\"item_id\" = i.\"item_id\"";
+        queries[1] = "SELECT /*+ USE_SORT_MERGE_JOIN*/ o.\"order_id\", i.name, o.quantity FROM " + JOIN_ITEM_TABLE_FULL_NAME + " i JOIN " 
+                + "(SELECT \"order_id\", \"item_id\", quantity FROM " + JOIN_ORDER_TABLE_FULL_NAME + ") o " 
+                + "ON o.\"item_id\" = i.\"item_id\"";
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            for (int i = 0; i < queries.length; i++) {
+                String query = queries[i];
+                Statement statement = conn.createStatement();
+                statement.setMaxRows(4);
+                ResultSet rs = statement.executeQuery(query);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000001");
+                assertEquals(rs.getString(2), "T1");
+                assertEquals(rs.getInt(3), 1000);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000003");
+                assertEquals(rs.getString(2), "T2");
+                assertEquals(rs.getInt(3), 3000);
+                assertTrue (rs.next());
+                assertEquals(rs.getString(1), "000000000000005");
+                assertEquals(rs.getString(2), "T3");
+                assertEquals(rs.getInt(3), 5000);
+                assertTrue (rs.next());
+                assertTrue(rs.getString(1).equals("000000000000002") || rs.getString(1).equals("000000000000004"));
+                assertEquals(rs.getString(2), "T6");
+                assertTrue(rs.getInt(3) == 2000 || rs.getInt(3) == 4000);
+
+                assertFalse(rs.next());
+                
+                rs = statement.executeQuery("EXPLAIN " + query);
+                assertEquals(i == 0 ? plans[1] : plans[1].replaceFirst("O\\.item_id", "item_id"), QueryUtil.getExplainPlan(rs));
+            }
+        } finally {
+            conn.close();
+        }
+    }
+
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/05723b19/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 9642489..137f4e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -277,8 +277,8 @@ public class QueryCompiler {
             QueryPlan plan = compileSingleQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin());
             Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table);
             Integer limit = null;
-            if (query.getLimit() != null && !query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
-                limit = LimitCompiler.compile(context, query);
+            if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
+                limit = plan.getLimit();
             }
             HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit, forceProjection);
             return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans);
@@ -333,8 +333,8 @@ public class QueryCompiler {
             QueryPlan rhsPlan = compileSingleQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right);
             Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable);
             Integer limit = null;
-            if (rhs.getLimit() != null && !rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
-                limit = LimitCompiler.compile(context, rhs);
+            if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
+                limit = rhsPlan.getLimit();
             }
             HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit, forceProjection);
             Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
@@ -429,16 +429,21 @@ public class QueryCompiler {
     }
 
     protected QueryPlan compileSubquery(SelectStatement subquery) throws SQLException {
-        subquery = SubselectRewriter.flatten(subquery, this.statement.getConnection());
-        ColumnResolver resolver = FromCompiler.getResolverForQuery(subquery, this.statement.getConnection());
+        PhoenixConnection connection = this.statement.getConnection();
+        subquery = SubselectRewriter.flatten(subquery, connection);
+        ColumnResolver resolver = FromCompiler.getResolverForQuery(subquery, connection);
         subquery = StatementNormalizer.normalize(subquery, resolver);
-        SelectStatement transformedSubquery = SubqueryRewriter.transform(subquery, resolver, this.statement.getConnection());
+        SelectStatement transformedSubquery = SubqueryRewriter.transform(subquery, resolver, connection);
         if (transformedSubquery != subquery) {
-            resolver = FromCompiler.getResolverForQuery(transformedSubquery, this.statement.getConnection());
+            resolver = FromCompiler.getResolverForQuery(transformedSubquery, connection);
             subquery = StatementNormalizer.normalize(transformedSubquery, resolver);
         }
+        int maxRows = this.statement.getMaxRows();
+        this.statement.setMaxRows(0); // overwrite maxRows to avoid its impact on inner queries.
         QueryPlan plan = new QueryCompiler(this.statement, subquery, resolver).compile();
-        return statement.getConnection().getQueryServices().getOptimizer().optimize(statement, plan);
+        plan = statement.getConnection().getQueryServices().getOptimizer().optimize(statement, plan);
+        this.statement.setMaxRows(maxRows); // restore maxRows.
+        return plan;
     }
 
     protected QueryPlan compileSingleQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter) throws SQLException{
@@ -490,12 +495,14 @@ public class QueryCompiler {
         RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns);
 
         // Final step is to build the query plan
-        int maxRows = statement.getMaxRows();
-        if (maxRows > 0) {
-            if (limit != null) {
-                limit = Math.min(limit, maxRows);
-            } else {
-                limit = maxRows;
+        if (!asSubquery) {
+            int maxRows = statement.getMaxRows();
+            if (maxRows > 0) {
+                if (limit != null) {
+                    limit = Math.min(limit, maxRows);
+                } else {
+                    limit = maxRows;
+                }
             }
         }
 


[27/50] [abbrv] phoenix git commit: PHOENIX-1649 Remove System.out/System.err debugging messages (Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1649 Remove System.out/System.err debugging messages (Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7dc3d842
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7dc3d842
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7dc3d842

Branch: refs/heads/calcite
Commit: 7dc3d84218bdcc9a9d408283f3badab9230b1ad9
Parents: acb8799
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Feb 10 09:50:42 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Feb 10 09:50:42 2015 -0800

----------------------------------------------------------------------
 .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java           | 2 +-
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java   | 3 ---
 .../test/java/org/apache/phoenix/schema/types/PDataTypeTest.java  | 1 -
 .../src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java  | 3 +--
 4 files changed, 2 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dc3d842/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index b3a980e..8cf8a8a 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -293,7 +293,7 @@ private int getKeyValueCount(HTable table) throws IOException {
     int count = 0;
     for (Result res : results) {
       count += res.list().size();
-      System.out.println(count + ") " + res);
+      LOG.debug(count + ") " + res);
     }
     results.close();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dc3d842/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 2722cb6..61ee081 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -332,9 +332,6 @@ public class MetaDataClient {
             table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
             tableTimestamp = table.getTimeStamp();
         } catch (TableNotFoundException e) {
-            System.err.println(e);
-            // TODO: Try again on services cache, as we may be looking for
-            // a global multi-tenant table
         }
         // Don't bother with server call: we can't possibly find a newer table
         if (table != null && !alwaysHitServer && (systemTable || tableTimestamp == clientTimeStamp - 1)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dc3d842/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
index 90730bc..b2e6e0b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
@@ -1011,7 +1011,6 @@ public class PDataTypeTest {
                 byte[] bytes = Bytes.toBytesBinary(str);
                 Object o = PDecimal.INSTANCE.toObject(bytes);
                 assertNotNull(o);
-                //System.out.println(o.getClass() +" " + bytesToHex(bytes)+" " + o+" ");
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dc3d842/phoenix-pig/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java b/phoenix-pig/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
index 51b2478..594abe6 100644
--- a/phoenix-pig/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
+++ b/phoenix-pig/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
@@ -520,8 +520,7 @@ public class PhoenixHBaseLoaderIT {
         Iterator<Tuple> iterator = pigServer.openIterator("A");
         int recordsRead = 0;
         while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            System.out.println(" the field value is "+tuple.get(1));
+            iterator.next();
             recordsRead++;
         }
         assertEquals(rows/2, recordsRead);


[04/50] [abbrv] phoenix git commit: PHOENIX-1616 Creating a View with a case sensitive column name does not work (Thomas D'Silva)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a5d7ef/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 72c67bf..9ab0692 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -48,18 +48,18 @@ import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.types.PVarbinary;
-import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarchar;
 
 import com.google.common.base.Preconditions;
 
@@ -174,7 +174,7 @@ public class SchemaUtil {
     }
 
     public static boolean isCaseSensitive(String name) {
-        return name.length() > 0 && name.charAt(0)=='"';
+        return name!=null && name.length() > 0 && name.charAt(0)=='"';
     }
     
     public static <T> List<T> concat(List<T> l1, List<T> l2) {


[11/50] [abbrv] phoenix git commit: PHOENIX-514 Support functional indexes (Thomas D'Silva)

Posted by ma...@apache.org.
PHOENIX-514 Support functional indexes (Thomas D'Silva)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8c340f5a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8c340f5a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8c340f5a

Branch: refs/heads/calcite
Commit: 8c340f5a6c030d009a4fa75444096a1999aea5c5
Parents: d6e7846
Author: James Taylor <ja...@apache.org>
Authored: Wed Feb 4 12:30:51 2015 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Wed Feb 4 12:30:51 2015 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  |  12 +-
 .../apache/phoenix/end2end/PercentileIT.java    |   7 +-
 .../java/org/apache/phoenix/end2end/ViewIT.java |   2 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java |  76 +-
 .../end2end/index/IndexExpressionIT.java        | 866 +++++++++++++++++++
 .../phoenix/end2end/index/IndexMetadataIT.java  |  20 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  32 +-
 .../IndexHalfStoreFileReaderGenerator.java      |   4 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   4 +-
 .../phoenix/compile/ExpressionCompiler.java     |   9 +-
 .../apache/phoenix/compile/FromCompiler.java    |   6 +-
 .../apache/phoenix/compile/HavingCompiler.java  |   4 +-
 .../compile/IndexExpressionCompiler.java        |  53 ++
 .../apache/phoenix/compile/JoinCompiler.java    |   8 +-
 .../phoenix/compile/PostIndexDDLCompiler.java   |  47 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   2 +-
 .../apache/phoenix/compile/WhereCompiler.java   |   4 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  44 +-
 .../UngroupedAggregateRegionObserver.java       |   3 +-
 .../coprocessor/generated/PTableProtos.java     | 215 ++++-
 .../phoenix/exception/SQLExceptionCode.java     |   6 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |   2 +-
 .../apache/phoenix/execute/MutationState.java   |   4 +-
 .../phoenix/expression/CoerceExpression.java    |  10 +-
 .../expression/RowKeyColumnExpression.java      |   6 +-
 .../apache/phoenix/hbase/index/ValueGetter.java |   2 +
 .../index/covered/data/LazyValueGetter.java     |   5 +
 .../hbase/index/util/IndexManagementUtil.java   |  18 -
 .../apache/phoenix/index/IndexMaintainer.java   | 333 ++++---
 .../apache/phoenix/index/PhoenixIndexCodec.java |   2 +-
 .../index/PhoenixIndexFailurePolicy.java        |  21 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   1 +
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   9 +-
 .../apache/phoenix/optimize/QueryOptimizer.java |  12 +-
 .../apache/phoenix/parse/BetweenParseNode.java  |  22 +
 .../org/apache/phoenix/parse/BindParseNode.java |  22 +
 .../org/apache/phoenix/parse/CastParseNode.java |  38 +
 .../org/apache/phoenix/parse/ColumnDef.java     |  14 +-
 .../org/apache/phoenix/parse/ColumnName.java    |   2 +-
 .../apache/phoenix/parse/ColumnParseNode.java   |   1 +
 .../apache/phoenix/parse/CompoundParseNode.java |  32 +-
 .../phoenix/parse/CreateIndexStatement.java     |  10 +-
 .../apache/phoenix/parse/ExistsParseNode.java   |  22 +
 .../phoenix/parse/FamilyWildcardParseNode.java  |  22 +
 .../apache/phoenix/parse/FunctionParseNode.java |  31 +
 .../apache/phoenix/parse/InListParseNode.java   |  22 +
 .../org/apache/phoenix/parse/InParseNode.java   |  25 +
 .../parse/IndexExpressionParseNodeRewriter.java | 104 +++
 .../phoenix/parse/IndexKeyConstraint.java       |  12 +-
 .../apache/phoenix/parse/IsNullParseNode.java   |  22 +
 .../org/apache/phoenix/parse/LikeParseNode.java |  26 +
 .../apache/phoenix/parse/LiteralParseNode.java  |  21 +
 .../org/apache/phoenix/parse/NamedNode.java     |   2 +-
 .../apache/phoenix/parse/NamedParseNode.java    |  30 +
 .../apache/phoenix/parse/ParseNodeFactory.java  |  34 +-
 .../phoenix/parse/SequenceValueParseNode.java   |  29 +
 .../apache/phoenix/parse/SubqueryParseNode.java |  28 +
 .../org/apache/phoenix/parse/TableName.java     |   3 +-
 .../phoenix/parse/TableWildcardParseNode.java   |  29 +
 .../apache/phoenix/parse/WildcardParseNode.java |  24 +-
 .../apache/phoenix/schema/DelegateColumn.java   |   6 +
 .../apache/phoenix/schema/DelegateTable.java    |   9 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 173 ++--
 .../java/org/apache/phoenix/schema/PColumn.java |   3 +
 .../org/apache/phoenix/schema/PColumnImpl.java  |  26 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    |   2 +-
 .../java/org/apache/phoenix/schema/PTable.java  |   7 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  27 +-
 .../org/apache/phoenix/schema/SaltingUtil.java  |   2 +-
 .../phoenix/schema/tuple/ValueGetterTuple.java  |  93 ++
 .../java/org/apache/phoenix/util/IndexUtil.java |  15 +-
 .../expression/ColumnExpressionTest.java        |   8 +-
 .../phoenix/index/IndexMaintainerTest.java      |  14 +-
 .../iterate/AggregateResultScannerTest.java     |   5 +
 .../java/org/apache/phoenix/query/BaseTest.java |  26 +-
 phoenix-protocol/src/main/PTable.proto          |   1 +
 76 files changed, 2475 insertions(+), 418 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 781bfea..76eab22 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -289,7 +289,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
@@ -380,7 +380,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
@@ -655,7 +655,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
@@ -747,7 +747,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
@@ -1039,7 +1039,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+ JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +
@@ -1134,7 +1134,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT MERGE SORT\n" +
                 "    PARALLEL INNER-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
-                "            SERVER FILTER BY order_id != '000000000000003'\n" +
+                "            SERVER FILTER BY \"order_id\" != '000000000000003'\n" +
                 "            PARALLEL INNER-JOIN TABLE 0\n" +
                 "                CLIENT PARALLEL 1-WAY RANGE SCAN OVER " +  MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+JOIN_ITEM_TABLE_DISPLAY_NAME + " [-32768]\n" +
                 "                    SERVER FILTER BY NAME != 'T3'\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PercentileIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PercentileIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PercentileIT.java
index 685daeb..8109694 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PercentileIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PercentileIT.java
@@ -50,6 +50,7 @@ import java.sql.Types;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -516,15 +517,17 @@ public class PercentileIT extends BaseClientManagedTimeIT {
     private static void populateINDEX_DATA_TABLETable() throws SQLException {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
+        Date date = DateUtil.parseDate("2015-01-01 00:00:00");
         try {
             String upsert = "UPSERT INTO " + INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + INDEX_DATA_TABLE
-                    + " VALUES(?, ?, ?, ?, ?)";
+                    + " VALUES(?, ?, ?, ?, ?, ?)";
             PreparedStatement stmt = conn.prepareStatement(upsert);
             stmt.setString(1, "varchar1");
             stmt.setString(2, "char1");
             stmt.setInt(3, 1);
             stmt.setLong(4, 1L);
             stmt.setBigDecimal(5, new BigDecimal(1.0));
+            stmt.setDate(6, date);
             stmt.executeUpdate();
             
             stmt.setString(1, "varchar2");
@@ -532,6 +535,7 @@ public class PercentileIT extends BaseClientManagedTimeIT {
             stmt.setInt(3, 2);
             stmt.setLong(4, 2L);
             stmt.setBigDecimal(5, new BigDecimal(2.0));
+            stmt.setDate(6, date);
             stmt.executeUpdate();
             
             stmt.setString(1, "varchar3");
@@ -539,6 +543,7 @@ public class PercentileIT extends BaseClientManagedTimeIT {
             stmt.setInt(3, 3);
             stmt.setLong(4, 3L);
             stmt.setBigDecimal(5, new BigDecimal(3.0));
+            stmt.setDate(6, date);
             stmt.executeUpdate();
             
             conn.commit();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index aa26f9b..9a89531 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -424,6 +424,6 @@ public class ViewIT extends BaseViewIT {
         String queryPlan = QueryUtil.getExplainPlan(rs);
         assertEquals(
                 "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" + 
-                "    SERVER FILTER BY (S2 = 'bas' AND S1 = 'foo')", queryPlan);
+                "    SERVER FILTER BY (S2 = 'bas' AND \"S1\" = 'foo')", queryPlan);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index 55b38a5..9eb9a57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 
 import java.math.BigDecimal;
 import java.sql.Connection;
+import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -38,6 +39,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.junit.Test;
@@ -50,23 +52,27 @@ public class ImmutableIndexIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
             String upsert = "UPSERT INTO " + INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + INDEX_DATA_TABLE
-                    + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+                    + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
             PreparedStatement stmt = conn.prepareStatement(upsert);
             stmt.setString(1, "varchar1");
             stmt.setString(2, "char1");
             stmt.setInt(3, 1);
             stmt.setLong(4, 1L);
             stmt.setBigDecimal(5, new BigDecimal(1.0));
-            stmt.setString(6, "varchar_a");
-            stmt.setString(7, "chara");
-            stmt.setInt(8, 2);
-            stmt.setLong(9, 2L);
-            stmt.setBigDecimal(10, new BigDecimal(2.0));
-            stmt.setString(11, "varchar_b");
-            stmt.setString(12, "charb");
-            stmt.setInt(13, 3);
-            stmt.setLong(14, 3L);
-            stmt.setBigDecimal(15, new BigDecimal(3.0));
+            Date date = DateUtil.parseDate("2015-01-01 00:00:00");
+            stmt.setDate(6, date);
+            stmt.setString(7, "varchar_a");
+            stmt.setString(8, "chara");
+            stmt.setInt(9, 2);
+            stmt.setLong(10, 2L);
+            stmt.setBigDecimal(11, new BigDecimal(2.0));
+            stmt.setDate(12, date);
+            stmt.setString(13, "varchar_b");
+            stmt.setString(14, "charb");
+            stmt.setInt(15, 3);
+            stmt.setLong(16, 3L);
+            stmt.setBigDecimal(17, new BigDecimal(3.0));
+            stmt.setDate(18, date);
             stmt.executeUpdate();
             
             stmt.setString(1, "varchar2");
@@ -74,16 +80,20 @@ public class ImmutableIndexIT extends BaseHBaseManagedTimeIT {
             stmt.setInt(3, 2);
             stmt.setLong(4, 2L);
             stmt.setBigDecimal(5, new BigDecimal(2.0));
-            stmt.setString(6, "varchar_a");
-            stmt.setString(7, "chara");
-            stmt.setInt(8, 3);
-            stmt.setLong(9, 3L);
-            stmt.setBigDecimal(10, new BigDecimal(3.0));
-            stmt.setString(11, "varchar_b");
-            stmt.setString(12, "charb");
-            stmt.setInt(13, 4);
-            stmt.setLong(14, 4L);
-            stmt.setBigDecimal(15, new BigDecimal(4.0));
+            date = DateUtil.parseDate("2015-01-02 00:00:00");
+            stmt.setDate(6, date);
+            stmt.setString(7, "varchar_a");
+            stmt.setString(8, "chara");
+            stmt.setInt(9, 3);
+            stmt.setLong(10, 3L);
+            stmt.setBigDecimal(11, new BigDecimal(3.0));
+            stmt.setDate(12, date);
+            stmt.setString(13, "varchar_b");
+            stmt.setString(14, "charb");
+            stmt.setInt(15, 4);
+            stmt.setLong(16, 4L);
+            stmt.setBigDecimal(17, new BigDecimal(4.0));
+            stmt.setDate(18, date);
             stmt.executeUpdate();
             
             stmt.setString(1, "varchar3");
@@ -91,16 +101,20 @@ public class ImmutableIndexIT extends BaseHBaseManagedTimeIT {
             stmt.setInt(3, 3);
             stmt.setLong(4, 3L);
             stmt.setBigDecimal(5, new BigDecimal(3.0));
-            stmt.setString(6, "varchar_a");
-            stmt.setString(7, "chara");
-            stmt.setInt(8, 4);
-            stmt.setLong(9, 4L);
-            stmt.setBigDecimal(10, new BigDecimal(4.0));
-            stmt.setString(11, "varchar_b");
-            stmt.setString(12, "charb");
-            stmt.setInt(13, 5);
-            stmt.setLong(14, 5L);
-            stmt.setBigDecimal(15, new BigDecimal(5.0));
+            date = DateUtil.parseDate("2015-01-03 00:00:00");
+            stmt.setDate(6, date);
+            stmt.setString(7, "varchar_a");
+            stmt.setString(8, "chara");
+            stmt.setInt(9, 4);
+            stmt.setLong(10, 4L);
+            stmt.setBigDecimal(11, new BigDecimal(4.0));
+            stmt.setDate(12, date);
+            stmt.setString(13, "varchar_b");
+            stmt.setString(14, "charb");
+            stmt.setInt(15, 5);
+            stmt.setLong(16, 5L);
+            stmt.setBigDecimal(17, new BigDecimal(5.0));
+            stmt.setDate(18, date);
             stmt.executeUpdate();
             
             conn.commit();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
new file mode 100644
index 0000000..28124b6
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -0,0 +1,866 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.TestUtil.INDEX_DATA_SCHEMA;
+import static org.apache.phoenix.util.TestUtil.INDEX_DATA_TABLE;
+import static org.apache.phoenix.util.TestUtil.MUTABLE_INDEX_DATA_TABLE;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Test;
+
+public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
+
+    private static final int NUM_MILLIS_IN_DAY = 86400000;
+
+    @Test
+    public void testImmutableIndexCreationAndUpdate() throws Exception {
+        helpTestCreateAndUpdate(false, false);
+    }
+
+    @Test
+    public void testImmutableLocalIndexCreationAndUpdate() throws Exception {
+        helpTestCreateAndUpdate(false, true);
+    }
+
+    @Test
+    public void testMutableIndexCreationAndUpdate() throws Exception {
+        helpTestCreateAndUpdate(true, false);
+    }
+
+    @Test
+    public void testMutableLocalIndexCreationAndUpdate() throws Exception {
+        helpTestCreateAndUpdate(true, true);
+    }
+
+    /**
+     * Adds a row to the index data table
+     * 
+     * @param i
+     *            row number
+     */
+    private void insertRow(PreparedStatement stmt, int i) throws SQLException {
+        // insert row
+        stmt.setString(1, "varchar" + String.valueOf(i));
+        stmt.setString(2, "char" + String.valueOf(i));
+        stmt.setInt(3, i);
+        stmt.setLong(4, i);
+        stmt.setBigDecimal(5, new BigDecimal(Double.valueOf(i)));
+        Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * NUM_MILLIS_IN_DAY);
+        stmt.setDate(6, date);
+        stmt.setString(7, "a.varchar" + String.valueOf(i));
+        stmt.setString(8, "a.char" + String.valueOf(i));
+        stmt.setInt(9, i);
+        stmt.setLong(10, i);
+        stmt.setBigDecimal(11, new BigDecimal((double)i));
+        stmt.setDate(12, date);
+        stmt.setString(13, "b.varchar" + String.valueOf(i));
+        stmt.setString(14, "b.char" + String.valueOf(i));
+        stmt.setInt(15, i);
+        stmt.setLong(16, i);
+        stmt.setBigDecimal(17, new BigDecimal((double)i));
+        stmt.setDate(18, date);
+        stmt.executeUpdate();
+    }
+
+    private void verifyResult(ResultSet rs, int i) throws SQLException {
+        assertTrue(rs.next());
+        assertEquals("VARCHAR" + String.valueOf(i) + "_" + StringUtils.rightPad("CHAR" + String.valueOf(i), 6, ' ')
+                + "_A.VARCHAR" + String.valueOf(i) + "_" + StringUtils.rightPad("B.CHAR" + String.valueOf(i), 10, ' '),
+                rs.getString(1));
+        assertEquals(i * 4, rs.getInt(2));
+        Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i) * NUM_MILLIS_IN_DAY);
+        assertEquals(date, rs.getDate(3));
+        assertEquals(date, rs.getDate(4));
+        assertEquals(date, rs.getDate(5));
+        assertEquals("varchar" + String.valueOf(i), rs.getString(6));
+        assertEquals("char" + String.valueOf(i), rs.getString(7));
+        assertEquals(i, rs.getInt(8));
+        assertEquals(i, rs.getLong(9));
+        assertEquals(i, rs.getDouble(10), 0.000001);
+        assertEquals(i, rs.getLong(11));
+        assertEquals(i, rs.getLong(12));
+    }
+
+    protected void helpTestCreateAndUpdate(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+
+            // create an expression index
+            String ddl = "CREATE "
+                    + (localIndex ? "LOCAL" : "")
+                    + " INDEX IDX ON "
+                    + fullDataTableName
+                    + " ((UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2)),"
+                    + " (decimal_pk+int_pk+decimal_col2+int_col1)," + " date_pk+1, date1+1, date2+1 )"
+                    + " INCLUDE (long_col1, long_col2)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+
+            // run select query with expression in WHERE clause
+            String whereSql = "SELECT long_col1, long_col2 from "
+                    + fullDataTableName
+                    + " WHERE UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2) = ?"
+                    + " AND decimal_pk+int_pk+decimal_col2+int_col1=?"
+                    // since a.date1 and b.date2 are NULLABLE and date is fixed width, these expressions are stored as
+                    // DECIMAL in the index (which is not fixed width)
+                    + " AND date_pk+1=? AND date1+1=? AND date2+1=?";
+            stmt = conn.prepareStatement(whereSql);
+            stmt.setString(1, "VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ");
+            stmt.setInt(2, 4);
+            Date date = DateUtil.parseDate("2015-01-02 00:00:00");
+            stmt.setDate(3, date);
+            stmt.setDate(4, date);
+            stmt.setDate(5, date);
+
+            // verify that the query does a range scan on the index table
+            ResultSet rs = stmt.executeQuery("EXPLAIN " + whereSql);
+            assertEquals(
+                    localIndex ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_INDEX_TEST."
+                            + dataTableName
+                            + " [-32768,'VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',4,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]\nCLIENT MERGE SORT"
+                            : "CLIENT PARALLEL 1-WAY RANGE SCAN OVER INDEX_TEST.IDX ['VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',4,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]",
+                    QueryUtil.getExplainPlan(rs));
+
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+            assertEquals(1, rs.getInt(2));
+            assertFalse(rs.next());
+
+            // verify all rows in data table are present in index table
+            String indexSelectSql = "SELECT UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2), "
+                    + "decimal_pk+int_pk+decimal_col2+int_col1, "
+                    + "date_pk+1, date1+1, date2+1, "
+                    + "varchar_pk, char_pk, int_pk, long_pk, decimal_pk, "
+                    + "long_col1, long_col2 "
+                    + "from "
+                    + fullDataTableName;
+            rs = conn.createStatement().executeQuery("EXPLAIN " + indexSelectSql);
+            assertEquals(localIndex ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
+                    + " [-32768]\nCLIENT MERGE SORT" : "CLIENT PARALLEL 1-WAY FULL SCAN OVER INDEX_TEST.IDX",
+                    QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(indexSelectSql);
+            verifyResult(rs, 1);
+            verifyResult(rs, 2);
+
+            // Insert two more rows to the index data table
+            String upsert = "UPSERT INTO " + fullDataTableName
+                    + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+            stmt = conn.prepareStatement(upsert);
+            insertRow(stmt, 3);
+            insertRow(stmt, 4);
+            conn.commit();
+
+            rs = conn.createStatement().executeQuery(indexSelectSql);
+            verifyResult(rs, 1);
+            verifyResult(rs, 2);
+            // verify that two rows added after index was created were also added to
+            // the index table
+            verifyResult(rs, 3);
+            verifyResult(rs, 4);
+
+            // update the first row
+            upsert = "UPSERT INTO "
+                    + fullDataTableName
+                    + "(varchar_pk, char_pk, int_pk, long_pk, decimal_pk, date_pk, a.varchar_col1) VALUES(?, ?, ?, ?, ?, ?, ?)";
+
+            stmt = conn.prepareStatement(upsert);
+            stmt.setString(1, "varchar1");
+            stmt.setString(2, "char1");
+            stmt.setInt(3, 1);
+            stmt.setLong(4, 1l);
+            stmt.setBigDecimal(5, new BigDecimal(1.0));
+            stmt.setDate(6, DateUtil.parseDate("2015-01-01 00:00:00"));
+            stmt.setString(7, "a.varchar_updated");
+            stmt.executeUpdate();
+            conn.commit();
+
+            // verify only one row was updated in the data table
+            String selectSql = "UPPER(varchar_pk) || '_' || UPPER(char_pk) || '_' || UPPER(varchar_col1) || '_' || UPPER(char_col2) from "
+                    + fullDataTableName;
+            rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ " + selectSql);
+            assertTrue(rs.next());
+            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_B.CHAR1   ", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR3_CHAR3 _A.VARCHAR3_B.CHAR3   ", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR4_CHAR4 _A.VARCHAR4_B.CHAR4   ", rs.getString(1));
+            assertFalse(rs.next());
+
+            // verify that the rows in the index table are also updated
+            rs = conn.createStatement().executeQuery("SELECT " + selectSql);
+            assertTrue(rs.next());
+            // if the data table is immutable, the index table will have one more
+            // row
+            if (!mutable) {
+                assertEquals("VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ", rs.getString(1));
+                assertTrue(rs.next());
+            }
+            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_" + (mutable ? "B.CHAR1   " : ""), rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR3_CHAR3 _A.VARCHAR3_B.CHAR3   ", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("VARCHAR4_CHAR4 _A.VARCHAR4_B.CHAR4   ", rs.getString(1));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    private void populateDataTable(Connection conn, String dataTable) throws SQLException {
+        ensureTableCreated(getUrl(), dataTable);
+        String upsert = "UPSERT INTO " + INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTable
+                + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+        PreparedStatement stmt1 = conn.prepareStatement(upsert);
+        // insert two rows
+        insertRow(stmt1, 1);
+        insertRow(stmt1, 2);
+        conn.commit();
+    }
+
+    @Test
+    public void testDeleteIndexedExpressionImmutableIndex() throws Exception {
+        helpTestDeleteIndexedExpression(false, false);
+    }
+
+    @Test
+    public void testDeleteIndexedExpressionImmutableLocalIndex() throws Exception {
+        helpTestDeleteIndexedExpression(false, true);
+    }
+
+    @Test
+    public void testDeleteIndexedExpressionMutableIndex() throws Exception {
+        helpTestDeleteIndexedExpression(true, false);
+    }
+
+    @Test
+    public void testDeleteIndexedExpressionMutableLocalIndex() throws Exception {
+        helpTestDeleteIndexedExpression(true, true);
+    }
+
+    protected void helpTestDeleteIndexedExpression(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        String fullIndexTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + "IDX";
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            ensureTableCreated(getUrl(), dataTableName);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (2*long_col2)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+
+            ResultSet rs;
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullDataTableName);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexTableName);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+
+            conn.setAutoCommit(true);
+            String dml = "DELETE from " + fullDataTableName + " WHERE long_col2 = 2";
+            try {
+                conn.createStatement().execute(dml);
+                if (!mutable) {
+                    fail();
+                }
+            } catch (SQLException e) {
+                if (!mutable) {
+                    assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
+                }
+            }
+
+            if (!mutable) {
+                dml = "DELETE from " + fullDataTableName + " WHERE 2*long_col2 = 4";
+                conn.createStatement().execute(dml);
+            }
+
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullDataTableName);
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexTableName);
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testDeleteCoveredColImmutableIndex() throws Exception {
+        helpTestDeleteCoveredCol(false, false);
+    }
+
+    @Test
+    public void testDeleteCoveredColImmutableLocalIndex() throws Exception {
+        helpTestDeleteCoveredCol(false, true);
+    }
+
+    @Test
+    public void testDeleteCoveredColMutableIndex() throws Exception {
+        helpTestDeleteCoveredCol(true, false);
+    }
+
+    @Test
+    public void testDeleteCoveredColMutableLocalIndex() throws Exception {
+        helpTestDeleteCoveredCol(true, true);
+    }
+
+    protected void helpTestDeleteCoveredCol(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        String fullIndexTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + "IDX";
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            ensureTableCreated(getUrl(), dataTableName);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (long_pk, varchar_pk, 1+long_pk, UPPER(varchar_pk) )" + " INCLUDE (long_col1, long_col2)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+
+            ResultSet rs;
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullDataTableName);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexTableName);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+
+            String dml = "DELETE from " + fullDataTableName + " WHERE long_col2 = 2";
+            assertEquals(1, conn.createStatement().executeUpdate(dml));
+            conn.commit();
+
+            String query = "SELECT /*+ NO_INDEX */ long_pk, varchar_pk, 1+long_pk, UPPER(varchar_pk) FROM "
+                    + fullDataTableName;
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(1L, rs.getLong(1));
+            assertEquals("varchar1", rs.getString(2));
+            assertEquals(2L, rs.getLong(3));
+            assertEquals("VARCHAR1", rs.getString(4));
+            assertFalse(rs.next());
+
+            query = "SELECT long_pk, varchar_pk, 1+long_pk, UPPER(varchar_pk) FROM " + fullDataTableName;
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(1L, rs.getLong(1));
+            assertEquals("varchar1", rs.getString(2));
+            assertEquals(2L, rs.getLong(3));
+            assertEquals("VARCHAR1", rs.getString(4));
+            assertFalse(rs.next());
+
+            query = "SELECT * FROM " + fullIndexTableName;
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+
+            assertEquals(1L, rs.getLong(1));
+            assertEquals("varchar1", rs.getString(2));
+            assertEquals(2L, rs.getLong(3));
+            assertEquals("VARCHAR1", rs.getString(4));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testGroupByCountImmutableIndex() throws Exception {
+        helpTestGroupByCount(false, false);
+    }
+
+    @Test
+    public void testGroupByCountImmutableLocalIndex() throws Exception {
+        helpTestGroupByCount(false, true);
+    }
+
+    @Test
+    public void testGroupByCountMutableIndex() throws Exception {
+        helpTestGroupByCount(true, false);
+    }
+
+    @Test
+    public void testGroupByCountMutableLocalIndex() throws Exception {
+        helpTestGroupByCount(true, true);
+    }
+
+    protected void helpTestGroupByCount(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+int_col2)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+
+            String groupBySql = "SELECT (int_col1+int_col2), COUNT(*) FROM " + fullDataTableName
+                    + " GROUP BY (int_col1+int_col2)";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + groupBySql);
+            String expectedPlan = "CLIENT PARALLEL 1-WAY "
+                    + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName + " [-32768]"
+                            : "FULL SCAN OVER INDEX_TEST.IDX")
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + B.INT_COL2))]\nCLIENT MERGE SORT";
+            assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(groupBySql);
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(2));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testSelectDistinctImmutableIndex() throws Exception {
+        helpTestSelectDistinct(false, false);
+    }
+
+    @Test
+    public void testSelectDistinctImmutableIndexLocal() throws Exception {
+        helpTestSelectDistinct(false, true);
+    }
+
+    @Test
+    public void testSelectDistinctMutableIndex() throws Exception {
+        helpTestSelectDistinct(true, false);
+    }
+
+    @Test
+    public void testSelectDistinctMutableLocalIndex() throws Exception {
+        helpTestSelectDistinct(true, true);
+    }
+
+    protected void helpTestSelectDistinct(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+1)";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            String sql = "SELECT distinct int_col1+1 FROM " + fullDataTableName + " where int_col1+1 > 0";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
+            String expectedPlan = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
+                    + (localIndex ? "_LOCAL_IDX_" + fullDataTableName + " [-32768,0] - [-32768,*]"
+                            : "INDEX_TEST.IDX [0] - [*]")
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + 1))]\nCLIENT MERGE SORT";
+            assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(sql);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(3, rs.getInt(1));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testInClauseWithImmutableIndex() throws Exception {
+        helpTestInClauseWithIndex(false, false);
+    }
+
+    @Test
+    public void testInClauseWithImmutableLocalIndex() throws Exception {
+        helpTestInClauseWithIndex(false, true);
+    }
+
+    @Test
+    public void testInClauseWithMutableIndex() throws Exception {
+        helpTestInClauseWithIndex(true, false);
+    }
+
+    @Test
+    public void testInClauseWithMutableLocalIndex() throws Exception {
+        helpTestInClauseWithIndex(true, false);
+    }
+
+    protected void helpTestInClauseWithIndex(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+1)";
+
+            conn = DriverManager.getConnection(getUrl(), props);
+            conn.setAutoCommit(false);
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            String sql = "SELECT int_col1+1 FROM " + fullDataTableName + " where int_col1+1 IN (2)";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
+                    + (localIndex ? "_LOCAL_IDX_" + fullDataTableName + " [-32768,2]\n    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT"
+                            : "INDEX_TEST.IDX [2]\n    SERVER FILTER BY FIRST KEY ONLY"), QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(sql);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testOrderByWithImmutableIndex() throws Exception {
+        helpTestOrderByWithIndex(false, false);
+    }
+
+    @Test
+    public void testOrderByWithImmutableLocalIndex() throws Exception {
+        helpTestOrderByWithIndex(false, true);
+    }
+
+    @Test
+    public void testOrderByWithMutableIndex() throws Exception {
+        helpTestOrderByWithIndex(true, false);
+    }
+
+    @Test
+    public void testOrderByWithMutableLocalIndex() throws Exception {
+        helpTestOrderByWithIndex(true, false);
+    }
+
+    protected void helpTestOrderByWithIndex(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+1)";
+
+            conn = DriverManager.getConnection(getUrl(), props);
+            conn.setAutoCommit(false);
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            String sql = "SELECT int_col1+1 FROM " + fullDataTableName + " ORDER BY int_col1+1";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
+            assertEquals("CLIENT PARALLEL 1-WAY "
+                    + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
+                            + " [-32768]\n    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT"
+                            : "FULL SCAN OVER INDEX_TEST.IDX\n    SERVER FILTER BY FIRST KEY ONLY"),
+                    QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(sql);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(3, rs.getInt(1));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableImmutableIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(false, false);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableImmutableLocalIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(false, true);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableMutableIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(true, false);
+    }
+
+    @Test
+    public void testSelectColOnlyInDataTableMutableLocalIndex() throws Exception {
+        helpTestSelectColOnlyInDataTable(true, false);
+    }
+
+    protected void helpTestSelectColOnlyInDataTable(boolean mutable, boolean localIndex) throws Exception {
+        String dataTableName = mutable ? MUTABLE_INDEX_DATA_TABLE : INDEX_DATA_TABLE;
+        String fullDataTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + dataTableName;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(false);
+            populateDataTable(conn, dataTableName);
+            String ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX IDX ON " + fullDataTableName
+                    + " (int_col1+1)";
+
+            conn = DriverManager.getConnection(getUrl(), props);
+            conn.setAutoCommit(false);
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            String sql = "SELECT int_col1+1, int_col2 FROM " + fullDataTableName + " WHERE int_col1+1=2";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql);
+            assertEquals("CLIENT PARALLEL 1-WAY "
+                    + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName
+                            + " [-32768,2]\n    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT" : "FULL SCAN OVER "
+                            + fullDataTableName + "\n    SERVER FILTER BY (A.INT_COL1 + 1) = 2"),
+                    QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(sql);
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            assertEquals(1, rs.getInt(2));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX IDX ON " + fullDataTableName);
+        } finally {
+            conn.close();
+        }
+    }
+    
+    @Test
+    public void testImmutableIndexWithCaseSensitiveCols() throws Exception {
+        helpTestIndexWithCaseSensitiveCols(false, false);
+    }
+    
+    @Test
+    public void testImmutableLocalIndexWithCaseSensitiveCols() throws Exception {
+        helpTestIndexWithCaseSensitiveCols(true, false);
+    }
+    
+    @Test
+    public void testMutableIndexWithCaseSensitiveCols() throws Exception {
+        helpTestIndexWithCaseSensitiveCols(true, false);
+    }
+    
+    @Test
+    public void testMutableLocalIndexWithCaseSensitiveCols() throws Exception {
+        helpTestIndexWithCaseSensitiveCols(true, false);
+    }
+    
+    protected void helpTestIndexWithCaseSensitiveCols(boolean mutable, boolean localIndex) throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.createStatement().execute("CREATE TABLE cs (k VARCHAR NOT NULL PRIMARY KEY, \"V1\" VARCHAR, \"v2\" VARCHAR) "+ (mutable ? "IMMUTABLE_ROWS=true" : ""));
+            String query = "SELECT * FROM cs";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertFalse(rs.next());
+            if (localIndex) {
+                conn.createStatement().execute("CREATE LOCAL INDEX ics ON cs (\"v2\" || '_modified') INCLUDE (\"V1\",\"v2\")");
+            } else {
+                conn.createStatement().execute("CREATE INDEX ics ON cs (\"V1\" || '_' || \"v2\") INCLUDE (\"V1\",\"v2\")");
+            }
+            query = "SELECT * FROM ics";
+            rs = conn.createStatement().executeQuery(query);
+            assertFalse(rs.next());
+
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO cs VALUES(?,?,?)");
+            stmt.setString(1,"a");
+            stmt.setString(2, "x");
+            stmt.setString(3, "1");
+            stmt.execute();
+            stmt.setString(1,"b");
+            stmt.setString(2, "y");
+            stmt.setString(3, "2");
+            stmt.execute();
+            conn.commit();
+
+            //TODO FIX THIS change this to *
+            query = "SELECT (\"V1\" || '_' || \"v2\"), k, \"V1\", \"v2\"  FROM cs WHERE (\"V1\" || '_' || \"v2\") = 'x_1'";
+            rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+            if(localIndex){
+                assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_CS [-32768,'x_1']\n"
+                           + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
+            } else {
+                assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER ICS ['x_1']", QueryUtil.getExplainPlan(rs));
+            }
+
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("x_1",rs.getString(1));
+            assertEquals("a",rs.getString(2));
+            assertEquals("x",rs.getString(3));
+            assertEquals("1",rs.getString(4));
+            //TODO figure out why this " " is needed
+            assertEquals("x_1",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertEquals("a",rs.getString("k"));
+            assertEquals("x",rs.getString("V1"));
+            assertEquals("1",rs.getString("v2"));
+            assertFalse(rs.next());
+
+            query = "SELECT \"V1\", \"V1\" as foo1, (\"V1\" || '_' || \"v2\") as foo, (\"V1\" || '_' || \"v2\") as \"Foo1\", (\"V1\" || '_' || \"v2\") FROM cs ORDER BY foo";
+            rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+            if(localIndex){
+                assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_CS [-32768]\nCLIENT MERGE SORT",
+                    QueryUtil.getExplainPlan(rs));
+            } else {
+                assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER ICS", QueryUtil.getExplainPlan(rs));
+            }
+
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("x",rs.getString(1));
+            assertEquals("x",rs.getString("V1"));
+            assertEquals("x",rs.getString(2));
+            assertEquals("x",rs.getString("foo1"));
+            assertEquals("x_1",rs.getString(3));
+            assertEquals("x_1",rs.getString("Foo"));
+            assertEquals("x_1",rs.getString(4));
+            assertEquals("x_1",rs.getString("Foo1"));
+            assertEquals("x_1",rs.getString(5));
+            assertEquals("x_1",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertTrue(rs.next());
+            assertEquals("y",rs.getString(1));
+            assertEquals("y",rs.getString("V1"));
+            assertEquals("y",rs.getString(2));
+            assertEquals("y",rs.getString("foo1"));
+            assertEquals("y_2",rs.getString(3));
+            assertEquals("y_2",rs.getString("Foo"));
+            assertEquals("y_2",rs.getString(4));
+            assertEquals("y_2",rs.getString("Foo1"));
+            assertEquals("y_2",rs.getString(5));
+            assertEquals("y_2",rs.getString("\"('V1' || '_' || 'v2')\""));
+            assertFalse(rs.next());
+            conn.createStatement().execute("DROP INDEX ICS ON CS");
+        } finally {
+            conn.close();
+        }
+    }
+    
+    @Test
+    public void testImmutableIndexDropIndexedColumn() throws Exception {
+        helpTestDropIndexedColumn(false, false);
+    }
+    
+    @Test
+    public void testImmutableLocalIndexDropIndexedColumn() throws Exception {
+        helpTestDropIndexedColumn(false, true);
+    }
+    
+    @Test
+    public void testMutableIndexDropIndexedColumn() throws Exception {
+        helpTestDropIndexedColumn(true, false);
+    }
+    
+    @Test
+    public void testMutableLocalIndexDropIndexedColumn() throws Exception {
+        helpTestDropIndexedColumn(true, true);
+    }
+    
+    public void helpTestDropIndexedColumn(boolean mutable, boolean local) throws Exception {
+        String query;
+        ResultSet rs;
+        PreparedStatement stmt;
+
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        // make sure that the tables are empty, but reachable
+        conn.createStatement().execute(
+          "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+        query = "SELECT * FROM t" ;
+        rs = conn.createStatement().executeQuery(query);
+        assertFalse(rs.next());
+        String indexName = "it_" + (mutable ? "m" : "im") + "_" + (local ? "l" : "h");
+        conn.createStatement().execute("CREATE " + ( local ? "LOCAL" : "") + " INDEX " + indexName + " ON t (v1 || '_' || v2)");
+
+        query = "SELECT * FROM t";
+        rs = conn.createStatement().executeQuery(query);
+        assertFalse(rs.next());
+
+        // load some data into the table
+        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?,?)");
+        stmt.setString(1, "a");
+        stmt.setString(2, "x");
+        stmt.setString(3, "1");
+        stmt.execute();
+        conn.commit();
+
+        assertIndexExists(conn,true);
+        conn.createStatement().execute("ALTER TABLE t DROP COLUMN v1");
+        assertIndexExists(conn,false);
+
+        query = "SELECT * FROM t";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("a",rs.getString(1));
+        assertEquals("1",rs.getString(2));
+        assertFalse(rs.next());
+
+        // load some data into the table
+        stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?)");
+        stmt.setString(1, "a");
+        stmt.setString(2, "2");
+        stmt.execute();
+        conn.commit();
+
+        query = "SELECT * FROM t";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("a",rs.getString(1));
+        assertEquals("2",rs.getString(2));
+        assertFalse(rs.next());
+    }
+    
+    private static void assertIndexExists(Connection conn, boolean exists) throws SQLException {
+        ResultSet rs = conn.getMetaData().getIndexInfo(null, null, "T", false, false);
+        assertEquals(exists, rs.next());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
index 88e7340..d6ced3c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
@@ -100,7 +100,7 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
         String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
         conn.createStatement().executeQuery("SELECT count(*) FROM " + fullTableName).next(); // client side cache will update
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).getIndexMaintainers(ptr);
+        pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).getIndexMaintainers(ptr, pconn);
         assertTrue(ptr.getLength() > 0);
     }
     
@@ -109,7 +109,7 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
         String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
         conn.createStatement().executeQuery("SELECT count(*) FROM " + fullTableName).next(); // client side cache will update
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).getIndexMaintainers(ptr);
+        pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).getIndexMaintainers(ptr, pconn);
         assertTrue(ptr.getLength() == 0);
     }
     
@@ -135,8 +135,9 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 5, ":CHAR_PK", Order.ASC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 6, ":LONG_PK", Order.DESC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 7, ":DECIMAL_PK", Order.ASC);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 8, "A:INT_COL1", null);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 9, "B:INT_COL2", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 8, ":DATE_PK", Order.ASC);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 9, "A:INT_COL1", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX", 10, "B:INT_COL2", null);
             assertFalse(rs.next());
             
             rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(INDEX_DATA_SCHEMA), StringUtil.escapeLike("IDX"), new String[] {PTableType.INDEX.getValue().getString() });
@@ -245,8 +246,9 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 5, ":CHAR_PK", Order.ASC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 6, ":LONG_PK", Order.DESC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 7, ":DECIMAL_PK", Order.ASC);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 8, "A:INT_COL1", null);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 9, "B:INT_COL2", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 8, ":DATE_PK", Order.ASC);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 9, "A:INT_COL1", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX1", 10, "B:INT_COL2", null);
 
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 1, "A:VARCHAR_COL1", Order.ASC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 2, "B:VARCHAR_COL2", Order.ASC);
@@ -255,7 +257,8 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 5, ":CHAR_PK", Order.ASC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 6, ":LONG_PK", Order.DESC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 7, ":DECIMAL_PK", Order.ASC);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 8, "B:INT_COL2", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 8, ":DATE_PK", Order.ASC);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, MUTABLE_INDEX_DATA_TABLE, "IDX2", 9, "B:INT_COL2", null);
             assertFalse(rs.next());
             
             // Create another table in the same schema
@@ -307,7 +310,8 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 6, ":INT_PK", Order.ASC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 7, ":LONG_PK", Order.DESC);
             assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 8, ":DECIMAL_PK", Order.ASC);
-            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 9, "A:INT_COL1", null);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 9, ":DATE_PK", Order.ASC);
+            assertIndexInfoMetadata(rs, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX", 10, "A:INT_COL1", null);
             assertFalse(rs.next());
             
             rs = IndexTestUtil.readDataTableIndexRow(conn, INDEX_DATA_SCHEMA, INDEX_DATA_TABLE, "IDX");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index bcf26be..fad5fb3 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -393,11 +393,11 @@ create_view_node returns [CreateTableStatement ret]
 // Parse a create index statement.
 create_index_node returns [CreateIndexStatement ret]
     :   CREATE l=LOCAL? INDEX (IF NOT ex=EXISTS)? i=index_name ON t=from_table_name
-        (LPAREN pk=index_pk_constraint RPAREN)
+        (LPAREN ik=ik_constraint RPAREN)
         (INCLUDE (LPAREN icrefs=column_names RPAREN))?
         (p=fam_properties)?
         (SPLIT ON v=value_expression_list)?
-        {ret = factory.createIndex(i, factory.namedTable(null,t), pk, icrefs, v, p, ex!=null, l==null ? IndexType.getDefault() : IndexType.LOCAL, getBindCount()); }
+        {ret = factory.createIndex(i, factory.namedTable(null,t), ik, icrefs, v, p, ex!=null, l==null ? IndexType.getDefault() : IndexType.LOCAL, getBindCount()); }
     ;
 
 // Parse a create sequence statement.
@@ -436,17 +436,17 @@ col_name_with_sort_order returns [Pair<ColumnName, SortOrder> ret]
     :   f=identifier (order=ASC|order=DESC)? {$ret = Pair.newPair(factory.columnName(f), order == null ? SortOrder.getDefault() : SortOrder.fromDDLValue(order.getText()));}
 ;
 
-index_pk_constraint returns [PrimaryKeyConstraint ret]
-    :   cols = col_def_name_with_sort_order_list {$ret = factory.primaryKey(null, cols); }
-    ;
+ik_constraint returns [IndexKeyConstraint ret]
+    :   x = expression_with_sort_order_list {$ret = factory.indexKey(x); }
+;
 
-col_def_name_with_sort_order_list returns [List<Pair<ColumnName, SortOrder>> ret]
-@init{ret = new ArrayList<Pair<ColumnName, SortOrder>>(); }
-    :   p=col_def_name_with_sort_order {$ret.add(p);}  (COMMA p = col_def_name_with_sort_order {$ret.add(p);} )*
+expression_with_sort_order_list returns [List<Pair<ParseNode, SortOrder>> ret]
+@init{ret = new ArrayList<Pair<ParseNode, SortOrder>>(); }
+    :   p=expression_with_sort_order {$ret.add(p);}  (COMMA p = expression_with_sort_order {$ret.add(p);} )*
 ;
 
-col_def_name_with_sort_order returns [Pair<ColumnName, SortOrder> ret]
-    :   c=column_name (order=ASC|order=DESC)? {$ret = Pair.newPair(c, order == null ? SortOrder.getDefault() : SortOrder.fromDDLValue(order.getText()));}
+expression_with_sort_order returns [Pair<ParseNode, SortOrder> ret]
+    :   (x=expression) (order=ASC|order=DESC)? {$ret = Pair.newPair(x, order == null ? SortOrder.getDefault() : SortOrder.fromDDLValue(order.getText()));}
 ;
 
 fam_properties returns [ListMultimap<String,Pair<String,Object>> ret]
@@ -780,7 +780,9 @@ term returns [ParseNode ret]
     |   field=identifier LPAREN l=zero_or_more_expressions RPAREN wg=(WITHIN GROUP LPAREN ORDER BY l2=one_or_more_expressions (a=ASC | DESC) RPAREN)?
         {
             FunctionParseNode f = wg==null ? factory.function(field, l) : factory.function(field,l,l2,a!=null);
-            contextStack.peek().setAggregate(f.isAggregate());
+            if (!contextStack.isEmpty()) {
+            	contextStack.peek().setAggregate(f.isAggregate());
+            }
             $ret = f;
         } 
     |   field=identifier LPAREN t=ASTERISK RPAREN 
@@ -789,13 +791,17 @@ term returns [ParseNode ret]
                 throwRecognitionException(t); 
             }
             FunctionParseNode f = factory.function(field, LiteralParseNode.STAR);
-            contextStack.peek().setAggregate(f.isAggregate()); 
+            if (!contextStack.isEmpty()) {
+            	contextStack.peek().setAggregate(f.isAggregate());
+            }
             $ret = f;
         } 
     |   field=identifier LPAREN t=DISTINCT l=zero_or_more_expressions RPAREN 
         {
             FunctionParseNode f = factory.functionDistinct(field, l);
-            contextStack.peek().setAggregate(f.isAggregate());
+            if (!contextStack.isEmpty()) {
+            	contextStack.peek().setAggregate(f.isAggregate());
+            }
             $ret = f;
         }
     |   e=case_statement { $ret = e; }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index f213d2d..718f820 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -57,8 +57,8 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
@@ -144,7 +144,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                         new HashMap<ImmutableBytesWritable, IndexMaintainer>();
                 for (PTable index : indexes) {
                     if (index.getIndexType() == IndexType.LOCAL) {
-                        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable);
+                        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, conn);
                         indexMaintainers.put(new ImmutableBytesWritable(MetaDataUtil
                                 .getViewIndexIdDataType().toBytes(index.getViewIndexId())),
                             indexMaintainer);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 0a2ee38..322d24a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -65,7 +65,6 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PRow;
 import org.apache.phoenix.schema.PTable;
@@ -75,6 +74,7 @@ import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.ScanUtil;
 
@@ -483,7 +483,7 @@ public class DeleteCompiler {
                     public MutationState execute() throws SQLException {
                         // TODO: share this block of code with UPSERT SELECT
                         ImmutableBytesWritable ptr = context.getTempPtr();
-                        tableRef.getTable().getIndexMaintainers(ptr);
+                        tableRef.getTable().getIndexMaintainers(ptr, context.getConnection());
                         ServerCache cache = null;
                         try {
                             if (ptr.getLength() > 0) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 95e145c..97818e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -135,6 +135,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
     protected final StatementContext context;
     protected final GroupBy groupBy;
     private int nodeCount;
+    private int totalNodeCount;
     private final boolean resolveViewConstants;
 
     public ExpressionCompiler(StatementContext context) {
@@ -166,6 +167,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
     public void reset() {
         this.isAggregate = false;
         this.nodeCount = 0;
+        this.totalNodeCount = 0;
     }
 
     @Override
@@ -420,6 +422,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
     @Override
     public void addElement(List<Expression> l, Expression element) {
         nodeCount--;
+        totalNodeCount++;
         l.add(element);
     }
 
@@ -553,7 +556,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
                 expr =  CastParseNode.convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children);
             }
         }
-        return CoerceExpression.create(expr, targetDataType, SortOrder.getDefault(), expr.getMaxLength());  
+        return wrapGroupByExpression(CoerceExpression.create(expr, targetDataType, SortOrder.getDefault(), expr.getMaxLength()));  
     }
     
    @Override
@@ -1254,4 +1257,8 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
         Object result = context.getSubqueryResult(node.getSelectNode());
         return LiteralExpression.newConstant(result);
     }
+    
+    public int getTotalNodeCount() {
+        return totalNodeCount;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 0163082..1a605b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -184,7 +184,7 @@ public class FromCompiler {
             Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression();
             PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(),
                     sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
-                    column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced());
+                    column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr());
             projectedColumns.add(projectedColumn);
             sourceExpressions.add(sourceExpression);
         }
@@ -391,7 +391,7 @@ public class FromCompiler {
                         familyName = PNameFactory.newName(family);
                     }
                     allcolumns.add(new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(),
-                            dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false));
+                            dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false, dynColumn.getExpression()));
                     position++;
                 }
                 theTable = PTableImpl.makePTable(theTable, allcolumns);
@@ -469,7 +469,7 @@ public class FromCompiler {
                 }
                 PColumnImpl column = new PColumnImpl(PNameFactory.newName(alias),
                         PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY),
-                        null, 0, 0, true, position++, SortOrder.ASC, null, null, false);
+                        null, 0, 0, true, position++, SortOrder.ASC, null, null, false, null);
                 columns.add(column);
             }
             PTable t = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
index 0cd6ecf..224a9b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
@@ -35,8 +35,8 @@ import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.SelectStatementRewriter;
 import org.apache.phoenix.schema.ColumnRef;
-import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.PBoolean;
 
 
 public class HavingCompiler {
@@ -171,7 +171,7 @@ public class HavingCompiler {
         @Override
         public Void visit(ColumnParseNode node) throws SQLException {
             ColumnRef ref = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName());
-            boolean isAggregateColumn = groupBy.getExpressions().indexOf(ref.newColumnExpression()) >= 0;
+            boolean isAggregateColumn = groupBy.getExpressions().indexOf(ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive())) >= 0;
             if (hasOnlyAggregateColumns == null) {
                 hasOnlyAggregateColumns = isAggregateColumn;
             } else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
new file mode 100644
index 0000000..b4a4168
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.expression.ColumnExpression;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.schema.ColumnRef;
+
+/**
+ * Used to check if the expression being compiled is a {@link ColumnExpression}
+ */
+public class IndexExpressionCompiler extends ExpressionCompiler {
+
+    //
+    private ColumnRef columnRef;
+
+    public IndexExpressionCompiler(StatementContext context) {
+        super(context);
+        this.columnRef = null;
+    }
+
+    @Override
+    public void reset() {
+        super.reset();
+        this.columnRef = null;
+    }
+
+    @Override
+    protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException {
+        ColumnRef columnRef = super.resolveColumn(node);
+        if (isTopLevel()) {
+            this.columnRef = columnRef;
+        }
+        return columnRef;
+    }
+
+    /**
+     * @return if the expression being compiled is a regular column the column ref, else is null
+     */
+    public ColumnRef getColumnRef() {
+        return columnRef;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c340f5a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 445edd8..c29ea23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -788,7 +788,7 @@ public class JoinCompiler {
             PName name = PNameFactory.newName(aliasedName);
             PColumnImpl column = new PColumnImpl(name, familyName, sourceColumn.getDataType(),
                     sourceColumn.getMaxLength(), sourceColumn.getScale(), sourceColumn.isNullable(),
-                    position, sourceColumn.getSortOrder(), sourceColumn.getArraySize(), sourceColumn.getViewConstant(), sourceColumn.isViewReferenced());
+                    position, sourceColumn.getSortOrder(), sourceColumn.getArraySize(), sourceColumn.getViewConstant(), sourceColumn.isViewReferenced(), sourceColumn.getExpressionStr());
             Expression sourceExpression = isLocalIndexColumnRef ?
                       NODE_FACTORY.column(TableName.create(schemaName, tableName), "\"" + colName + "\"", null).accept(new ExpressionCompiler(context))
                     : new ColumnRef(tableRef, sourceColumn.getPosition()).newColumnExpression();
@@ -807,7 +807,7 @@ public class JoinCompiler {
                 Expression sourceExpression = rowProjector.getColumnProjector(column.getPosition()).getExpression();
                 PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(colName), PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY),
                         sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
-                        column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced());
+                        column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr());
                 projectedColumns.add(projectedColumn);
                 sourceExpressions.add(sourceExpression);
             }
@@ -1367,7 +1367,7 @@ public class JoinCompiler {
                     } else {
                         PColumnImpl column = new PColumnImpl(c.getName(), c.getFamilyName(), c.getDataType(),
                                 c.getMaxLength(), c.getScale(), true, c.getPosition(),
-                                c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced());
+                                c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced(), c.getExpressionStr());
                         merged.add(column);
                     }
                 }
@@ -1378,7 +1378,7 @@ public class JoinCompiler {
                     PColumnImpl column = new PColumnImpl(c.getName(),
                             PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), c.getDataType(),
                             c.getMaxLength(), c.getScale(), type == JoinType.Inner ? c.isNullable() : true, position++,
-                            c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced());
+                            c.getSortOrder(), c.getArraySize(), c.getViewConstant(), c.isViewReferenced(), c.getExpressionStr());
                     merged.add(column);
                 }
             }


[47/50] [abbrv] phoenix git commit: PHOENIX-1690 IndexOutOfBoundsException during SkipScanFilter interesect

Posted by ma...@apache.org.
PHOENIX-1690 IndexOutOfBoundsException during SkipScanFilter interesect


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/49f06b33
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/49f06b33
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/49f06b33

Branch: refs/heads/calcite
Commit: 49f06b331c2b364f176d77548d04a4eb9d15c5c9
Parents: 3f82975
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Mar 3 12:07:56 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Mar 3 12:07:56 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/filter/SkipScanFilter.java   |  31 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |   9 +
 .../phoenix/filter/SkipScanBigFilterTest.java   | 717 +++++++++++++++++++
 .../filter/SkipScanFilterIntersectTest.java     |  69 +-
 4 files changed, 821 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/49f06b33/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index 0aafdbb..1923856 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -195,7 +195,7 @@ public class SkipScanFilter extends FilterBase implements Writable {
         Arrays.fill(position, 0);
     }
     
-    private boolean intersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey, List<List<KeyRange>> newSlots) {
+    private boolean intersect(final byte[] lowerInclusiveKey, final byte[] upperExclusiveKey, List<List<KeyRange>> newSlots) {
         resetState();
         boolean lowerUnbound = (lowerInclusiveKey.length == 0);
         int startPos = 0;
@@ -262,6 +262,9 @@ public class SkipScanFilter extends FilterBase implements Writable {
         }
         int[] lowerPosition = Arrays.copyOf(position, position.length);
         // Navigate to the upperExclusiveKey, but not past it
+        // TODO: We're including everything between the lowerPosition and end position, which is
+        // more than we need. We can optimize this by tracking whether each range in each slot position
+        // intersects.
         ReturnCode endCode = navigate(upperExclusiveKey, 0, upperExclusiveKey.length, Terminate.AT);
         if (endCode == ReturnCode.INCLUDE) {
             setStartKey();
@@ -286,6 +289,11 @@ public class SkipScanFilter extends FilterBase implements Writable {
                 position[i] = slots.get(i).size() - 1;
             }
         }
+        int prevRowKeyPos = -1;
+        ImmutableBytesWritable lowerPtr = new ImmutableBytesWritable();
+        ImmutableBytesWritable upperPtr = new ImmutableBytesWritable();
+        schema.iterator(lowerInclusiveKey, lowerPtr);
+        schema.iterator(upperExclusiveKey, upperPtr);
         // Copy inclusive all positions 
         for (int i = 0; i <= lastSlot; i++) {
             List<KeyRange> newRanges = slots.get(i).subList(lowerPosition[i], Math.min(position[i] + 1, slots.get(i).size()));
@@ -295,12 +303,33 @@ public class SkipScanFilter extends FilterBase implements Writable {
             if (newSlots != null) {
                 newSlots.add(newRanges);
             }
+            // Must include all "less-significant" slot values if:
+            // 1) a more-significant slot was incremented
             if (position[i] > lowerPosition[i]) {
                 if (newSlots != null) {
                     newSlots.addAll(slots.subList(i+1, slots.size()));
                 }
                 break;
             }
+            // 2) we're at a slot containing a range and the values differ between the lower and upper range,
+            //    since less-significant slots may be lower after traversal than where they started.
+            if (!slots.get(i).get(position[i]).isSingleKey()) {
+                int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i);
+                // Position lowerPtr/upperPtr within lowerInclusiveKey/upperExclusiveKey at value for slot i
+                // The reposition method will do this incrementally, where we we're initially have prevRowKeyPos = -1. 
+                schema.reposition(lowerPtr, prevRowKeyPos, rowKeyPos, 0, lowerInclusiveKey.length, slotSpan[i]);
+                schema.reposition(upperPtr, prevRowKeyPos, rowKeyPos, 0, upperExclusiveKey.length, slotSpan[i]);
+                // If we have a range and the values differ, we must include all slots that are less significant.
+                // For example: [A-D][1,23], the lower/upper keys could be B5/C2, where the C is in range and the
+                // next slot value of 2 is less than the next corresponding slot value of the 5.
+                if (lowerPtr.compareTo(upperPtr) != 0) {
+                    if (newSlots != null) {
+                        newSlots.addAll(slots.subList(i+1, slots.size()));
+                    }
+                    break;
+                }
+                prevRowKeyPos = rowKeyPos;
+            }
         }
         return true;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/49f06b33/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index e14565d..658ff23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -236,6 +236,15 @@ public class PTableImpl implements PTable {
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
     }
 
+    public static PTableImpl makePTable(PTable table, PTableStats stats) throws SQLException {
+        return new PTableImpl(
+                table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), table.getTimeStamp(),
+                table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table),
+                table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
+                table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
+                table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), stats);
+    }
+
     public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber,
             PName pkName, Integer bucketNum, List<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
             boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,


[06/50] [abbrv] phoenix git commit: PHOENIX-1613 Allow old commons-collections version

Posted by ma...@apache.org.
PHOENIX-1613 Allow old commons-collections version

Handle an IllegalAccessError while initializing tracing, in order
to allow Phoenix to be used (without tracing) together with
SquirrelSQL. The IllegalAccessError occurs because SquirrelSQL
uses an old (2.x) version of commons-collections, while tracing
depends on a 3.x version.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3f48938d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3f48938d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3f48938d

Branch: refs/heads/calcite
Commit: 3f48938d5d5a67d59129eb90295894cc76e58508
Parents: 03a5d7e
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Sun Feb 1 07:10:24 2015 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Sun Feb 1 07:55:12 2015 +0100

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/trace/util/Tracing.java   | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f48938d/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 7e1df72..a46d4e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -254,6 +254,12 @@ public class Tracing {
         } catch (RuntimeException e) {
             LOG.warn("Tracing will outputs will not be written to any metrics sink! No "
                     + "TraceMetricsSink found on the classpath", e);
+        } catch (IllegalAccessError e) {
+            // This is an issue when we have a class incompatibility error, such as when running
+            // within SquirrelSQL which uses an older incompatible version of commons-collections.
+            // Seeing as this only results in disabling tracing, we swallow this exception and just
+            // continue on without tracing.
+            LOG.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
         }
         initialized = true;
     }


[30/50] [abbrv] phoenix git commit: PHOENIX-1656 - Add release dir to gitignore

Posted by ma...@apache.org.
PHOENIX-1656 - Add release dir to gitignore


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f925a403
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f925a403
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f925a403

Branch: refs/heads/calcite
Commit: f925a4034b7b78f8e9d15539cdd7b0cc8a54e59b
Parents: fc299d5
Author: Mujtaba <mu...@apache.org>
Authored: Thu Feb 12 13:00:51 2015 -0800
Committer: Mujtaba <mu...@apache.org>
Committed: Thu Feb 12 13:00:51 2015 -0800

----------------------------------------------------------------------
 .gitignore | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f925a403/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 21a2c92..5f0dab5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,4 @@
 
 #maven stuffs
 target/
+release/


[26/50] [abbrv] phoenix git commit: PHOENIX-1646 Views and functional index expressions may lose information when stringified

Posted by ma...@apache.org.
PHOENIX-1646 Views and functional index expressions may lose information when stringified


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/acb87990
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/acb87990
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/acb87990

Branch: refs/heads/calcite
Commit: acb87990d81bfc60bba0f511957c18e1a12b4da3
Parents: 2730e87
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Feb 10 01:58:29 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Feb 10 01:58:29 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java  | 4 ++--
 .../it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java   | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/acb87990/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index 932c68b..e4ba2c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -322,8 +322,8 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "    SERVER FILTER BY V1 = 'a'\n" +
                     "    SKIP-SCAN-JOIN TABLE 0\n" +
                     "        CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_T \\[-32768,1\\] - \\[-32768,2\\]\n" +
-                    "            SERVER FILTER BY FIRST KEY ONLY AND \"K2\" IN (3,4)\n" +
-                    "    DYNAMIC SERVER FILTER BY (\"T_ID\", \"K1\", \"K2\") IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
+                    "            SERVER FILTER BY FIRST KEY ONLY AND \"K2\" IN \\(3,4\\)\n" +
+                    "    DYNAMIC SERVER FILTER BY \\(\"T_ID\", \"K1\", \"K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
             assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected,actual));
             
             rs = conn1.createStatement().executeQuery(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/acb87990/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 9e66bbf..4080730 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -471,7 +471,7 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
                         + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)+" [-32768,*] - [-32768,'z']\n"
                         + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"V1\", \"T_ID\", \"K3\"]\n" + "CLIENT MERGE SORT",
+                        + "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"V1\", \"T_ID\", K3]\n" + "CLIENT MERGE SORT",
                 QueryUtil.getExplainPlan(rs));
             
             rs = conn1.createStatement().executeQuery(query);


[49/50] [abbrv] phoenix git commit: Merge branch 'master' into calcite

Posted by ma...@apache.org.
Merge branch 'master' into calcite


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b9d24a94
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b9d24a94
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b9d24a94

Branch: refs/heads/calcite
Commit: b9d24a94986360525bbe70b93308434877ed078d
Parents: 679571b 9a546b9
Author: maryannxue <we...@intel.com>
Authored: Wed Mar 4 12:33:38 2015 -0500
Committer: maryannxue <we...@intel.com>
Committed: Wed Mar 4 12:33:38 2015 -0500

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 bin/phoenix_sandbox.py                          |   59 +
 bin/phoenix_utils.py                            |    2 +-
 bin/sandbox-log4j.properties                    |   45 +
 phoenix-assembly/pom.xml                        |    4 +-
 phoenix-assembly/src/build/client.xml           |    1 -
 .../src/build/components-major-client.xml       |    3 +-
 .../src/build/components-minimal.xml            |    1 +
 .../components/all-common-dependencies.xml      |    1 -
 .../src/build/components/all-common-jars.xml    |    3 +-
 .../src/build/package-to-tar-all.xml            |   14 +
 .../src/build/server-without-antlr.xml          |    2 +-
 phoenix-assembly/src/build/server.xml           |    1 -
 phoenix-core/pom.xml                            |   19 +
 ...ReplayWithIndexWritesAndCompressedWALIT.java |    2 +-
 .../src/it/java/org/apache/phoenix/Sandbox.java |   67 +
 .../phoenix/end2end/AggregateQueryIT.java       |  238 +
 .../apache/phoenix/end2end/AlterTableIT.java    | 1374 +++-
 .../phoenix/end2end/ArithmeticQueryIT.java      |  100 +
 .../org/apache/phoenix/end2end/ArrayIT.java     |    2 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |   17 +-
 .../end2end/BaseTenantSpecificViewIndexIT.java  |   16 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |   15 +-
 .../phoenix/end2end/CSVCommonsLoaderIT.java     |   47 +-
 .../end2end/ColumnProjectionOptimizationIT.java |   24 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |   47 +-
 .../phoenix/end2end/DecodeFunctionIT.java       |    7 +-
 .../apache/phoenix/end2end/DerivedTableIT.java  |   15 +-
 .../phoenix/end2end/DisableLocalIndexIT.java    |   99 +
 .../apache/phoenix/end2end/DynamicFamilyIT.java |   23 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   |   15 +
 .../org/apache/phoenix/end2end/HashJoinIT.java  |  596 +-
 .../phoenix/end2end/HashJoinLocalIndexIT.java   |  171 +
 .../org/apache/phoenix/end2end/InListIT.java    |   23 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |    2 +-
 .../end2end/MinMaxAggregateFunctionIT.java      |   86 +
 .../phoenix/end2end/ParallelIteratorsIT.java    |    4 +-
 .../apache/phoenix/end2end/PercentileIT.java    |    7 +-
 .../phoenix/end2end/PhoenixRuntimeIT.java       |  138 +
 .../phoenix/end2end/PointInTimeQueryIT.java     |  179 +
 .../phoenix/end2end/ProductMetricsIT.java       |   15 +-
 .../end2end/QueryDatabaseMetaDataIT.java        |   59 +-
 .../org/apache/phoenix/end2end/QueryIT.java     |  343 +-
 .../org/apache/phoenix/end2end/QueryMoreIT.java |   14 +
 .../apache/phoenix/end2end/QueryTimeoutIT.java  |   94 +
 .../phoenix/end2end/QueryWithLimitIT.java       |    2 +-
 .../org/apache/phoenix/end2end/SequenceIT.java  |    1 +
 .../apache/phoenix/end2end/SkipScanQueryIT.java |   90 +-
 .../apache/phoenix/end2end/SortMergeJoinIT.java |  363 +-
 .../end2end/StatsCollectorAbstractIT.java       |   77 +
 .../phoenix/end2end/StatsCollectorIT.java       |  185 +-
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |  256 +
 .../apache/phoenix/end2end/StoreNullsIT.java    |  226 +
 .../org/apache/phoenix/end2end/SubqueryIT.java  |  369 +-
 .../end2end/SubqueryUsingSortMergeJoinIT.java   |  906 +++
 .../end2end/TenantSpecificViewIndexIT.java      |    7 +-
 .../phoenix/end2end/ToDateFunctionIT.java       |  179 +
 .../phoenix/end2end/ToNumberFunctionIT.java     |    6 +-
 .../phoenix/end2end/TruncateFunctionIT.java     |    6 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |   18 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |   99 +-
 .../phoenix/end2end/VariableLengthPKIT.java     |   16 +-
 .../java/org/apache/phoenix/end2end/ViewIT.java |   61 +-
 .../phoenix/end2end/index/BaseIndexIT.java      |   30 -
 .../end2end/index/BaseMutableIndexIT.java       | 1212 ++-
 .../phoenix/end2end/index/DropViewIT.java       |    2 +-
 .../index/GlobalIndexOptimizationIT.java        |  469 ++
 .../end2end/index/GlobalMutableIndexIT.java     |   26 +
 .../phoenix/end2end/index/ImmutableIndexIT.java |   83 +-
 .../end2end/index/IndexExpressionIT.java        | 1267 +++
 .../phoenix/end2end/index/IndexMetadataIT.java  |   65 +-
 .../phoenix/end2end/index/IndexTestUtil.java    |    2 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |  497 +-
 .../end2end/index/LocalMutableIndexIT.java      |   26 +
 .../phoenix/end2end/index/MutableIndexIT.java   | 1344 ----
 .../phoenix/end2end/index/SaltedIndexIT.java    |   51 +-
 .../phoenix/end2end/index/ViewIndexIT.java      |   17 +-
 .../phoenix/mapreduce/CsvBulkLoadToolIT.java    |   82 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |   32 +-
 .../apache/phoenix/trace/TracingTestUtil.java   |   17 +
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |   71 +-
 .../regionserver/IndexHalfStoreFileReader.java  |   55 +-
 .../IndexHalfStoreFileReaderGenerator.java      |   52 +-
 .../regionserver/IndexSplitTransaction.java     |    6 +-
 .../hbase/regionserver/LocalIndexMerger.java    |  122 +
 .../hbase/regionserver/LocalIndexSplitter.java  |   47 +-
 .../apache/phoenix/cache/ServerCacheClient.java |    4 +-
 .../apache/phoenix/compile/ColumnProjector.java |    2 +-
 .../phoenix/compile/CreateSequenceCompiler.java |   13 +-
 .../phoenix/compile/CreateTableCompiler.java    |    8 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |    8 +-
 .../phoenix/compile/ExpressionCompiler.java     |  271 +-
 .../phoenix/compile/ExpressionProjector.java    |    2 +-
 .../apache/phoenix/compile/FromCompiler.java    |  148 +-
 .../apache/phoenix/compile/GroupByCompiler.java |   16 +-
 .../apache/phoenix/compile/HavingCompiler.java  |    8 +-
 .../compile/IndexExpressionCompiler.java        |   53 +
 .../phoenix/compile/IndexStatementRewriter.java |   17 +-
 .../apache/phoenix/compile/JoinCompiler.java    |  655 +-
 .../apache/phoenix/compile/LimitCompiler.java   |    6 +-
 .../MutatingParallelIteratorFactory.java        |    4 +-
 .../apache/phoenix/compile/OrderByCompiler.java |    2 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |    4 +-
 .../phoenix/compile/PostIndexDDLCompiler.java   |   47 +-
 .../phoenix/compile/ProjectionCompiler.java     |  111 +-
 .../apache/phoenix/compile/QueryCompiler.java   |  170 +-
 .../apache/phoenix/compile/RowProjector.java    |   72 +-
 .../org/apache/phoenix/compile/ScanRanges.java  |    7 +-
 .../apache/phoenix/compile/SequenceManager.java |   54 -
 .../compile/SequenceValueExpression.java        |   83 +
 .../phoenix/compile/StatementContext.java       |   66 +-
 .../phoenix/compile/SubqueryRewriter.java       |    2 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |  220 +
 .../compile/TupleProjectionCompiler.java        |  214 +
 .../apache/phoenix/compile/UpsertCompiler.java  |   42 +-
 .../apache/phoenix/compile/WhereCompiler.java   |   12 +-
 .../apache/phoenix/compile/WhereOptimizer.java  |   28 +-
 .../coprocessor/BaseScannerRegionObserver.java  |  240 +-
 .../GroupedAggregateRegionObserver.java         |  163 +-
 .../coprocessor/HashJoinRegionScanner.java      |   72 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  284 +-
 .../phoenix/coprocessor/MetaDataProtocol.java   |   65 +-
 .../coprocessor/MetaDataRegionObserver.java     |    8 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |  181 +-
 .../coprocessor/SequenceRegionObserver.java     |   38 +-
 .../phoenix/coprocessor/SuffixFilter.java       |   17 +
 .../UngroupedAggregateRegionObserver.java       |  330 +-
 .../generated/PGuidePostsProtos.java            |  732 ++
 .../coprocessor/generated/PTableProtos.java     |  517 +-
 .../phoenix/exception/SQLExceptionCode.java     |   49 +-
 .../ValueTypeIncompatibleException.java         |    2 +-
 .../apache/phoenix/execute/AggregatePlan.java   |   59 +-
 .../apache/phoenix/execute/BaseQueryPlan.java   |    5 +-
 .../phoenix/execute/ClientAggregatePlan.java    |    2 +-
 .../apache/phoenix/execute/HashJoinPlan.java    |   71 +-
 .../apache/phoenix/execute/MutationState.java   |    4 +-
 .../org/apache/phoenix/execute/ScanPlan.java    |    3 +-
 .../phoenix/execute/SortMergeJoinPlan.java      |   12 +-
 .../apache/phoenix/execute/TupleProjector.java  |   18 +-
 .../phoenix/expression/AndExpression.java       |    6 +-
 .../phoenix/expression/AndOrExpression.java     |    7 +-
 .../expression/ArithmeticExpression.java        |    4 +-
 .../expression/ArrayConstructorExpression.java  |   31 +-
 .../expression/BaseAddSubtractExpression.java   |    2 +-
 .../expression/BaseCompoundExpression.java      |   11 -
 .../phoenix/expression/BaseExpression.java      |   15 +-
 .../expression/BaseSingleExpression.java        |    6 +-
 .../expression/BaseTerminalExpression.java      |    8 -
 .../phoenix/expression/CaseExpression.java      |   28 +-
 .../phoenix/expression/CoerceExpression.java    |   25 +-
 .../phoenix/expression/ColumnExpression.java    |    2 +-
 .../expression/ComparisonExpression.java        |  155 +-
 .../phoenix/expression/DateAddExpression.java   |   22 +-
 .../expression/DateSubtractExpression.java      |   22 +-
 .../expression/DecimalAddExpression.java        |   16 +-
 .../expression/DecimalDivideExpression.java     |   16 +-
 .../expression/DecimalMultiplyExpression.java   |   16 +-
 .../expression/DecimalSubtractExpression.java   |   19 +-
 .../phoenix/expression/DelegateExpression.java  |    2 +-
 .../apache/phoenix/expression/Determinism.java  |    2 +-
 .../phoenix/expression/DivideExpression.java    |    2 +-
 .../phoenix/expression/DoubleAddExpression.java |   11 +-
 .../expression/DoubleDivideExpression.java      |   11 +-
 .../expression/DoubleMultiplyExpression.java    |   11 +-
 .../expression/DoubleSubtractExpression.java    |   11 +-
 .../phoenix/expression/ExpressionType.java      |   22 +-
 .../phoenix/expression/InListExpression.java    |   11 +-
 .../phoenix/expression/IsNullExpression.java    |   20 +-
 .../phoenix/expression/LikeExpression.java      |   21 +-
 .../phoenix/expression/LiteralExpression.java   |   38 +-
 .../phoenix/expression/LongAddExpression.java   |   13 +-
 .../expression/LongDivideExpression.java        |   13 +-
 .../expression/LongMultiplyExpression.java      |   11 +-
 .../expression/LongSubtractExpression.java      |   14 +-
 .../phoenix/expression/ModulusExpression.java   |   29 +-
 .../phoenix/expression/MultiplyExpression.java  |    2 +-
 .../phoenix/expression/NotExpression.java       |   19 +-
 .../expression/ProjectedColumnExpression.java   |   43 +-
 .../expression/RowKeyColumnExpression.java      |    6 +-
 .../phoenix/expression/RowKeyExpression.java    |   13 +-
 .../RowValueConstructorExpression.java          |    9 +-
 .../expression/StringConcatExpression.java      |    9 +-
 .../expression/TimestampAddExpression.java      |   28 +-
 .../expression/TimestampSubtractExpression.java |   28 +-
 .../expression/aggregator/BaseAggregator.java   |    7 +
 .../aggregator/BaseDecimalStddevAggregator.java |   11 +-
 .../aggregator/BaseStddevAggregator.java        |   12 +-
 .../expression/aggregator/CountAggregator.java  |    5 +-
 .../aggregator/DecimalSumAggregator.java        |    9 +-
 .../DistinctCountClientAggregator.java          |    9 +-
 .../DistinctValueClientAggregator.java          |    4 +-
 .../DistinctValueWithCountClientAggregator.java |    5 +-
 .../DistinctValueWithCountServerAggregator.java |    5 +-
 .../aggregator/DoubleSumAggregator.java         |    9 +-
 .../FirstLastValueBaseClientAggregator.java     |    5 +-
 .../FirstLastValueServerAggregator.java         |    5 +-
 .../expression/aggregator/IntSumAggregator.java |    5 +-
 .../aggregator/LongSumAggregator.java           |    5 +-
 .../aggregator/NumberSumAggregator.java         |    7 +-
 .../aggregator/PercentRankClientAggregator.java |    7 +-
 .../aggregator/PercentileClientAggregator.java  |    7 +-
 .../PercentileDiscClientAggregator.java         |    5 +-
 .../aggregator/UnsignedIntSumAggregator.java    |    5 +-
 .../aggregator/UnsignedLongSumAggregator.java   |    5 +-
 .../function/ArrayAllComparisonExpression.java  |    2 +-
 .../function/ArrayAnyComparisonExpression.java  |   20 +-
 .../function/ArrayElemRefExpression.java        |   83 +
 .../expression/function/ArrayIndexFunction.java |   18 +-
 .../function/ArrayLengthFunction.java           |   15 +-
 .../function/AvgAggregateFunction.java          |   13 +-
 .../expression/function/CeilDateExpression.java |    2 +-
 .../function/CeilDecimalExpression.java         |   20 +-
 .../expression/function/CeilFunction.java       |   12 +-
 .../function/CeilTimestampExpression.java       |   21 +-
 .../expression/function/CoalesceFunction.java   |    2 +-
 .../function/ConvertTimezoneFunction.java       |   16 +-
 .../function/CountAggregateFunction.java        |    5 +-
 .../function/CurrentDateFunction.java           |    9 +-
 .../function/CurrentTimeFunction.java           |    9 +-
 .../expression/function/DecodeFunction.java     |    8 +-
 ...elegateConstantToCountAggregateFunction.java |    6 +-
 .../DistinctCountAggregateFunction.java         |    9 +-
 .../DistinctValueAggregateFunction.java         |    8 +-
 .../expression/function/EncodeFunction.java     |   10 +-
 .../function/ExternalSqlTypeIdFunction.java     |   11 +-
 .../function/FirstLastValueBaseFunction.java    |    2 +-
 .../expression/function/FirstValueFunction.java |    4 +-
 .../function/FloorDateExpression.java           |   12 +-
 .../function/FloorDecimalExpression.java        |   20 +-
 .../expression/function/FloorFunction.java      |   12 +-
 .../expression/function/FunctionExpression.java |    2 +-
 .../function/IndexStateNameFunction.java        |    8 +-
 .../function/InlineArrayElemRefExpression.java  |   73 -
 .../expression/function/InvertFunction.java     |    2 +-
 .../expression/function/LTrimFunction.java      |    7 +-
 .../expression/function/LastValueFunction.java  |    4 +-
 .../expression/function/LengthFunction.java     |   13 +-
 .../expression/function/LowerFunction.java      |    9 +-
 .../expression/function/LpadFunction.java       |   17 +-
 .../expression/function/MD5Function.java        |    5 +-
 .../function/MaxAggregateFunction.java          |   15 +-
 .../function/MinAggregateFunction.java          |   14 +-
 .../expression/function/NthValueFunction.java   |    7 +-
 .../function/PercentRankAggregateFunction.java  |    8 +-
 .../PercentileContAggregateFunction.java        |   12 +-
 .../PercentileDiscAggregateFunction.java        |   10 +-
 .../expression/function/PrefixFunction.java     |    2 +-
 .../expression/function/RTrimFunction.java      |    7 +-
 .../expression/function/RandomFunction.java     |  133 +
 .../function/RegexpReplaceFunction.java         |   21 +-
 .../function/RegexpSplitFunction.java           |   22 +-
 .../function/RegexpSubstrFunction.java          |   16 +-
 .../expression/function/ReverseFunction.java    |    7 +-
 .../function/RoundDateExpression.java           |   12 +-
 .../function/RoundDecimalExpression.java        |   32 +-
 .../expression/function/RoundFunction.java      |   12 +-
 .../function/RoundTimestampExpression.java      |   21 +-
 .../function/SQLIndexTypeFunction.java          |    8 +-
 .../function/SQLTableTypeFunction.java          |    8 +-
 .../function/SQLViewTypeFunction.java           |    8 +-
 .../expression/function/ScalarFunction.java     |    9 +
 .../function/SingleAggregateFunction.java       |    2 +-
 .../function/SqlTypeNameFunction.java           |    8 +-
 .../expression/function/StddevPopFunction.java  |    9 +-
 .../expression/function/StddevSampFunction.java |    9 +-
 .../expression/function/SubstrFunction.java     |   16 +-
 .../function/SumAggregateFunction.java          |   69 +-
 .../function/TimezoneOffsetFunction.java        |   16 +-
 .../expression/function/ToCharFunction.java     |   11 +-
 .../expression/function/ToDateFunction.java     |  120 +-
 .../expression/function/ToNumberFunction.java   |   13 +-
 .../expression/function/ToTimeFunction.java     |   63 +
 .../function/ToTimestampFunction.java           |   63 +
 .../expression/function/TrimFunction.java       |    7 +-
 .../expression/function/TruncFunction.java      |   14 +-
 .../expression/function/UpperFunction.java      |    9 +-
 .../visitor/BaseExpressionVisitor.java          |  127 +-
 .../visitor/CloneExpressionVisitor.java         |  195 +
 .../expression/visitor/ExpressionVisitor.java   |   37 +-
 .../visitor/KeyValueExpressionVisitor.java      |    2 +-
 .../visitor/RowKeyExpressionVisitor.java        |    2 +-
 .../visitor/SingleAggregateFunctionVisitor.java |    2 +-
 .../StatelessTraverseAllExpressionVisitor.java  |  181 +
 .../StatelessTraverseNoExpressionVisitor.java   |  181 +
 .../visitor/TraverseAllExpressionVisitor.java   |    6 +-
 .../visitor/TraverseNoExpressionVisitor.java    |    4 +-
 .../filter/MultiKeyValueComparisonFilter.java   |    5 +-
 .../filter/SingleKeyValueComparisonFilter.java  |    3 +-
 .../apache/phoenix/filter/SkipScanFilter.java   |   31 +-
 .../hbase/index/IndexRegionSplitPolicy.java     |    4 +
 .../org/apache/phoenix/hbase/index/Indexer.java |  225 +-
 .../apache/phoenix/hbase/index/ValueGetter.java |    2 +
 .../index/covered/data/LazyValueGetter.java     |    5 +
 .../hbase/index/master/IndexMasterObserver.java |   47 +
 .../hbase/index/util/IndexManagementUtil.java   |   18 -
 .../hbase/index/write/IndexWriterUtils.java     |    1 -
 .../apache/phoenix/index/IndexMaintainer.java   |  328 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |   22 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |    2 +-
 .../index/PhoenixIndexFailurePolicy.java        |   23 +-
 .../phoenix/iterate/BaseResultIterators.java    |   14 +-
 .../phoenix/iterate/ChunkedResultIterator.java  |   24 +
 .../apache/phoenix/iterate/ExplainTable.java    |    5 +-
 .../FilterAggregatingResultIterator.java        |    4 +-
 .../phoenix/iterate/FilterResultIterator.java   |    4 +-
 .../phoenix/iterate/OrderedResultIterator.java  |    4 +-
 .../iterate/RegionScannerResultIterator.java    |   36 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   66 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   97 +-
 .../phoenix/jdbc/PhoenixParameterMetaData.java  |    2 +-
 .../phoenix/jdbc/PhoenixPreparedStatement.java  |   10 +-
 .../apache/phoenix/jdbc/PhoenixResultSet.java   |   84 +-
 .../phoenix/jdbc/PhoenixResultSetMetaData.java  |    9 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   74 +-
 .../apache/phoenix/join/HashCacheClient.java    |   58 +-
 .../org/apache/phoenix/join/HashJoinInfo.java   |   25 +-
 .../phoenix/mapreduce/CsvBulkLoadTool.java      |   85 +-
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  |    3 +-
 .../phoenix/mapreduce/PhoenixInputFormat.java   |  117 +
 .../phoenix/mapreduce/PhoenixInputSplit.java    |  129 +
 .../mapreduce/PhoenixOutputCommitter.java       |   54 +
 .../phoenix/mapreduce/PhoenixOutputFormat.java  |   62 +
 .../phoenix/mapreduce/PhoenixRecordReader.java  |  142 +
 .../phoenix/mapreduce/PhoenixRecordWriter.java  |   91 +
 .../util/ColumnInfoToStringEncoderDecoder.java  |   65 +
 .../phoenix/mapreduce/util/ConnectionUtil.java  |   58 +
 .../util/PhoenixConfigurationUtil.java          |  316 +
 .../mapreduce/util/PhoenixMapReduceUtil.java    |   99 +
 .../apache/phoenix/optimize/QueryOptimizer.java |  196 +-
 .../phoenix/parse/AddColumnStatement.java       |   14 +-
 .../org/apache/phoenix/parse/AddParseNode.java  |    6 +
 .../AggregateFunctionWithinGroupParseNode.java  |   52 +
 .../org/apache/phoenix/parse/AliasedNode.java   |   35 +
 .../org/apache/phoenix/parse/AndParseNode.java  |   14 +
 .../phoenix/parse/ArithmeticParseNode.java      |   15 +
 .../parse/ArrayAllAnyComparisonNode.java        |   49 +
 .../phoenix/parse/ArrayAllComparisonNode.java   |    3 +-
 .../phoenix/parse/ArrayAnyComparisonNode.java   |    3 +-
 .../phoenix/parse/ArrayConstructorNode.java     |   17 +
 .../apache/phoenix/parse/ArrayElemRefNode.java  |   11 +
 .../apache/phoenix/parse/BetweenParseNode.java  |   40 +-
 .../org/apache/phoenix/parse/BindParseNode.java |   30 +-
 .../org/apache/phoenix/parse/BindTableNode.java |    8 +
 .../org/apache/phoenix/parse/CaseParseNode.java |   20 +
 .../org/apache/phoenix/parse/CastParseNode.java |   87 +-
 .../org/apache/phoenix/parse/CeilParseNode.java |   12 +-
 .../org/apache/phoenix/parse/ColumnDef.java     |   53 +-
 .../org/apache/phoenix/parse/ColumnName.java    |    2 +-
 .../apache/phoenix/parse/ColumnParseNode.java   |   52 +-
 .../phoenix/parse/ComparisonParseNode.java      |   10 +
 .../apache/phoenix/parse/CompoundParseNode.java |   33 +-
 .../apache/phoenix/parse/ConcreteTableNode.java |   19 +
 .../phoenix/parse/CreateIndexStatement.java     |   10 +-
 .../apache/phoenix/parse/DerivedTableNode.java  |   27 +
 .../phoenix/parse/DistinctCountParseNode.java   |   16 +
 .../apache/phoenix/parse/DivideParseNode.java   |    7 +
 .../apache/phoenix/parse/ExistsParseNode.java   |   31 +
 .../phoenix/parse/FamilyWildcardParseNode.java  |   30 +
 .../apache/phoenix/parse/FloorParseNode.java    |    8 +-
 .../apache/phoenix/parse/FunctionParseNode.java |  152 +-
 .../java/org/apache/phoenix/parse/HintNode.java |   52 +-
 .../apache/phoenix/parse/InListParseNode.java   |   41 +
 .../org/apache/phoenix/parse/InParseNode.java   |   44 +-
 .../parse/IndexExpressionParseNodeRewriter.java |  102 +
 .../phoenix/parse/IndexKeyConstraint.java       |   12 +-
 .../apache/phoenix/parse/IsNullParseNode.java   |   32 +
 .../org/apache/phoenix/parse/JoinTableNode.java |   51 +
 .../org/apache/phoenix/parse/LikeParseNode.java |   38 +
 .../org/apache/phoenix/parse/LimitNode.java     |   29 +
 .../apache/phoenix/parse/LiteralParseNode.java  |   46 +-
 .../apache/phoenix/parse/ModulusParseNode.java  |    6 +
 .../apache/phoenix/parse/MultiplyParseNode.java |    6 +
 .../org/apache/phoenix/parse/NamedNode.java     |    8 +-
 .../apache/phoenix/parse/NamedParseNode.java    |   43 +-
 .../apache/phoenix/parse/NamedTableNode.java    |   38 +
 .../org/apache/phoenix/parse/NotParseNode.java  |    9 +
 .../org/apache/phoenix/parse/OrParseNode.java   |   15 +
 .../org/apache/phoenix/parse/OrderByNode.java   |   34 +-
 .../phoenix/parse/OuterJoinParseNode.java       |   47 -
 .../org/apache/phoenix/parse/ParseNode.java     |   11 +
 .../apache/phoenix/parse/ParseNodeFactory.java  |  100 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |    2 +-
 .../apache/phoenix/parse/RoundParseNode.java    |   11 +-
 .../parse/RowValueConstructorParseNode.java     |   16 +
 .../apache/phoenix/parse/SelectStatement.java   |   99 +
 .../phoenix/parse/SequenceValueParseNode.java   |   39 +
 .../phoenix/parse/StringConcatParseNode.java    |   14 +
 .../apache/phoenix/parse/SubqueryParseNode.java |   36 +
 .../apache/phoenix/parse/SubtractParseNode.java |    7 +
 .../org/apache/phoenix/parse/TableName.java     |   31 +-
 .../org/apache/phoenix/parse/TableNode.java     |   10 +
 .../phoenix/parse/TableWildcardParseNode.java   |   36 +
 .../apache/phoenix/parse/ToCharParseNode.java   |    9 +-
 .../apache/phoenix/parse/ToDateParseNode.java   |   15 +-
 .../apache/phoenix/parse/ToNumberParseNode.java |    8 +-
 .../apache/phoenix/parse/ToTimeParseNode.java   |   48 +
 .../phoenix/parse/ToTimestampParseNode.java     |   48 +
 .../apache/phoenix/parse/TraceStatement.java    |   43 +
 .../apache/phoenix/parse/WildcardParseNode.java |   36 +-
 .../phoenix/query/ConnectionQueryServices.java  |    5 +-
 .../query/ConnectionQueryServicesImpl.java      |  742 +-
 .../query/ConnectionlessQueryServicesImpl.java  |   11 +-
 .../query/DelegateConnectionQueryServices.java  |    9 +-
 .../org/apache/phoenix/query/HTableFactory.java |    3 +-
 .../apache/phoenix/query/MetaDataMutated.java   |    2 +-
 .../apache/phoenix/query/QueryConstants.java    |   59 +-
 .../org/apache/phoenix/query/QueryServices.java |   44 +-
 .../phoenix/query/QueryServicesOptions.java     |  129 +-
 .../schema/ArgumentTypeMismatchException.java   |   17 +
 .../org/apache/phoenix/schema/ColumnRef.java    |   10 +-
 .../apache/phoenix/schema/DelegateColumn.java   |    6 +
 .../apache/phoenix/schema/DelegateDatum.java    |    2 +
 .../apache/phoenix/schema/DelegateTable.java    |   16 +-
 .../ExecuteQueryNotApplicableException.java     |    7 +-
 .../ExecuteUpdateNotApplicableException.java    |    7 +-
 .../apache/phoenix/schema/KeyValueSchema.java   |   17 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java |   28 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  815 +-
 .../apache/phoenix/schema/PArrayDataType.java   |  660 --
 .../java/org/apache/phoenix/schema/PColumn.java |    3 +
 .../org/apache/phoenix/schema/PColumnImpl.java  |   29 +-
 .../org/apache/phoenix/schema/PDataType.java    | 7452 ------------------
 .../org/apache/phoenix/schema/PDateColumn.java  |    5 +-
 .../java/org/apache/phoenix/schema/PDatum.java  |    1 +
 .../org/apache/phoenix/schema/PIndexState.java  |    5 +-
 .../apache/phoenix/schema/PIntegerColumn.java   |    4 +-
 .../org/apache/phoenix/schema/PLongColumn.java  |    4 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    |    6 +-
 .../apache/phoenix/schema/PStringColumn.java    |    7 +-
 .../java/org/apache/phoenix/schema/PTable.java  |   58 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  219 +-
 .../org/apache/phoenix/schema/PTableType.java   |    2 +-
 .../org/apache/phoenix/schema/PhoenixArray.java |  551 --
 .../apache/phoenix/schema/ProjectedColumn.java  |   59 +
 .../org/apache/phoenix/schema/RowKeySchema.java |    2 +-
 .../org/apache/phoenix/schema/SaltingUtil.java  |    3 +-
 .../org/apache/phoenix/schema/Sequence.java     |   40 +-
 .../apache/phoenix/schema/TableProperty.java    |  114 +
 .../org/apache/phoenix/schema/TableRef.java     |   41 +-
 .../phoenix/schema/TypeMismatchException.java   |    1 +
 .../org/apache/phoenix/schema/ValueSchema.java  |    3 +-
 .../phoenix/schema/stats/GuidePostsInfo.java    |  176 +-
 .../schema/stats/StatisticsCollector.java       |  103 +-
 .../phoenix/schema/stats/StatisticsScanner.java |   30 +-
 .../phoenix/schema/stats/StatisticsUtil.java    |   51 +-
 .../phoenix/schema/stats/StatisticsWriter.java  |  112 +-
 .../phoenix/schema/tuple/ValueGetterTuple.java  |   93 +
 .../phoenix/schema/types/PArrayDataType.java    |  673 ++
 .../apache/phoenix/schema/types/PBinary.java    |  194 +
 .../phoenix/schema/types/PBinaryArray.java      |  108 +
 .../apache/phoenix/schema/types/PBoolean.java   |  140 +
 .../phoenix/schema/types/PBooleanArray.java     |  108 +
 .../org/apache/phoenix/schema/types/PChar.java  |  209 +
 .../apache/phoenix/schema/types/PCharArray.java |  108 +
 .../apache/phoenix/schema/types/PDataType.java  | 1182 +++
 .../phoenix/schema/types/PDataTypeFactory.java  |  118 +
 .../org/apache/phoenix/schema/types/PDate.java  |  196 +
 .../apache/phoenix/schema/types/PDateArray.java |  109 +
 .../apache/phoenix/schema/types/PDecimal.java   |  405 +
 .../phoenix/schema/types/PDecimalArray.java     |  110 +
 .../apache/phoenix/schema/types/PDouble.java    |  311 +
 .../phoenix/schema/types/PDoubleArray.java      |  108 +
 .../org/apache/phoenix/schema/types/PFloat.java |  308 +
 .../phoenix/schema/types/PFloatArray.java       |  109 +
 .../apache/phoenix/schema/types/PInteger.java   |  275 +
 .../phoenix/schema/types/PIntegerArray.java     |  109 +
 .../org/apache/phoenix/schema/types/PLong.java  |  331 +
 .../apache/phoenix/schema/types/PLongArray.java |  109 +
 .../apache/phoenix/schema/types/PSmallint.java  |  259 +
 .../phoenix/schema/types/PSmallintArray.java    |  109 +
 .../org/apache/phoenix/schema/types/PTime.java  |  141 +
 .../apache/phoenix/schema/types/PTimeArray.java |  110 +
 .../apache/phoenix/schema/types/PTimestamp.java |  209 +
 .../phoenix/schema/types/PTimestampArray.java   |  109 +
 .../apache/phoenix/schema/types/PTinyint.java   |  253 +
 .../phoenix/schema/types/PTinyintArray.java     |  109 +
 .../phoenix/schema/types/PUnsignedDate.java     |  164 +
 .../schema/types/PUnsignedDateArray.java        |  110 +
 .../phoenix/schema/types/PUnsignedDouble.java   |  158 +
 .../schema/types/PUnsignedDoubleArray.java      |  112 +
 .../phoenix/schema/types/PUnsignedFloat.java    |  152 +
 .../schema/types/PUnsignedFloatArray.java       |  109 +
 .../phoenix/schema/types/PUnsignedInt.java      |  178 +
 .../phoenix/schema/types/PUnsignedIntArray.java |  109 +
 .../phoenix/schema/types/PUnsignedLong.java     |  193 +
 .../schema/types/PUnsignedLongArray.java        |  109 +
 .../phoenix/schema/types/PUnsignedSmallint.java |  159 +
 .../schema/types/PUnsignedSmallintArray.java    |  110 +
 .../phoenix/schema/types/PUnsignedTime.java     |  120 +
 .../schema/types/PUnsignedTimeArray.java        |  109 +
 .../schema/types/PUnsignedTimestamp.java        |  148 +
 .../schema/types/PUnsignedTimestampArray.java   |  110 +
 .../phoenix/schema/types/PUnsignedTinyint.java  |  155 +
 .../schema/types/PUnsignedTinyintArray.java     |  110 +
 .../apache/phoenix/schema/types/PVarbinary.java |  176 +
 .../phoenix/schema/types/PVarbinaryArray.java   |  109 +
 .../apache/phoenix/schema/types/PVarchar.java   |  163 +
 .../phoenix/schema/types/PVarcharArray.java     |  109 +
 .../phoenix/schema/types/PhoenixArray.java      |  644 ++
 .../org/apache/phoenix/trace/util/Tracing.java  |   83 +-
 .../java/org/apache/phoenix/util/ByteUtil.java  |    2 +-
 .../apache/phoenix/util/CSVCommonsLoader.java   |    2 +-
 .../org/apache/phoenix/util/ColumnInfo.java     |    2 +-
 .../java/org/apache/phoenix/util/DateUtil.java  |  287 +-
 .../org/apache/phoenix/util/ExpressionUtil.java |    2 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |  121 +-
 .../java/org/apache/phoenix/util/JDBCUtil.java  |   19 +
 .../org/apache/phoenix/util/MetaDataUtil.java   |   15 +-
 .../org/apache/phoenix/util/NumberUtil.java     |    2 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   52 +-
 .../java/org/apache/phoenix/util/QueryUtil.java |   41 +-
 .../java/org/apache/phoenix/util/ScanUtil.java  |    6 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   44 +-
 .../org/apache/phoenix/util/ServerUtil.java     |   13 +-
 .../org/apache/phoenix/util/StringUtil.java     |    5 +
 .../org/apache/phoenix/util/UpgradeUtil.java    |  194 +-
 .../phoenix/util/csv/CsvUpsertExecutor.java     |   67 +-
 .../util/csv/StringToArrayConverter.java        |   26 +-
 .../arithmetic/ArithmeticOperationTest.java     |  318 -
 .../phoenix/compile/LimitCompilerTest.java      |   14 +-
 .../phoenix/compile/QueryCompilerTest.java      |  138 +-
 .../compile/ScanRangesIntersectTest.java        |   11 +-
 .../apache/phoenix/compile/ScanRangesTest.java  |  286 +-
 .../compile/StatementHintsCompilationTest.java  |    2 +-
 .../TenantSpecificViewIndexCompileTest.java     |    3 +-
 .../phoenix/compile/WhereCompilerTest.java      |  157 +-
 .../phoenix/compile/WhereOptimizerTest.java     |  313 +-
 .../expression/ArithmeticOperationTest.java     |  303 +
 .../expression/CoerceExpressionTest.java        |   25 +-
 .../expression/ColumnExpressionTest.java        |   26 +-
 .../phoenix/expression/ILikeExpressionTest.java |    2 +-
 .../phoenix/expression/LikeExpressionTest.java  |    2 +-
 .../RoundFloorCeilExpressionsTest.java          |  243 +-
 .../expression/SortOrderExpressionTest.java     |  124 +-
 .../function/ExternalSqlTypeIdFunctionTest.java |   36 +-
 .../phoenix/filter/SkipScanBigFilterTest.java   |  717 ++
 .../filter/SkipScanFilterIntersectTest.java     |  405 +-
 .../phoenix/filter/SkipScanFilterTest.java      |  114 +-
 .../phoenix/index/IndexMaintainerTest.java      |   14 +-
 .../iterate/AggregateResultScannerTest.java     |   21 +-
 .../jdbc/PhoenixPreparedStatementTest.java      |   34 +-
 .../mapreduce/CsvBulkImportUtilTest.java        |    4 +-
 .../mapreduce/CsvToKeyValueMapperTest.java      |   16 +-
 .../ColumnInfoToStringEncoderDecoderTest.java   |   60 +
 .../util/PhoenixConfigurationUtilTest.java      |  124 +
 .../phoenix/parse/BuiltInFunctionInfoTest.java  |  122 +
 .../apache/phoenix/parse/QueryParserTest.java   |  346 +-
 .../query/BaseConnectionlessQueryTest.java      |   23 +
 .../java/org/apache/phoenix/query/BaseTest.java |  411 +-
 .../phoenix/query/ConnectionlessTest.java       |   20 +-
 .../phoenix/query/KeyRangeCoalesceTest.java     |   59 +-
 .../phoenix/query/KeyRangeIntersectTest.java    |   31 +-
 .../apache/phoenix/query/KeyRangeUnionTest.java |   33 +-
 .../query/ParallelIteratorsSplitTest.java       |   13 +-
 .../org/apache/phoenix/query/QueryPlanTest.java |    4 +-
 .../phoenix/schema/PDataTypeForArraysTest.java  |  500 +-
 .../apache/phoenix/schema/PDataTypeTest.java    | 1678 ----
 .../apache/phoenix/schema/RowKeySchemaTest.java |    1 +
 .../phoenix/schema/RowKeyValueAccessorTest.java |    1 +
 .../apache/phoenix/schema/SortOrderTest.java    |   23 +-
 .../apache/phoenix/schema/ValueBitSetTest.java  |    1 +
 .../phoenix/schema/types/PDataTypeTest.java     | 1755 +++++
 .../org/apache/phoenix/util/ByteUtilTest.java   |    8 +-
 .../org/apache/phoenix/util/DateUtilTest.java   |  116 +-
 .../org/apache/phoenix/util/IndexUtilTest.java  |  150 +
 .../org/apache/phoenix/util/JDBCUtilTest.java   |   36 +
 .../apache/phoenix/util/PhoenixRuntimeTest.java |   60 +-
 .../org/apache/phoenix/util/QueryUtilTest.java  |    2 +-
 .../org/apache/phoenix/util/ScanUtilTest.java   |  138 +-
 .../java/org/apache/phoenix/util/TestUtil.java  |   55 +-
 .../phoenix/util/csv/CsvUpsertExecutorTest.java |   13 +-
 .../util/csv/StringToArrayConverterTest.java    |    7 +-
 .../phoenix/flume/RegexEventSerializerIT.java   |    2 +-
 .../flume/serializer/RegexEventSerializer.java  |    2 +-
 .../phoenix/pig/PhoenixHBaseLoaderIT.java       |    3 +-
 .../phoenix/pig/PhoenixPigConfigurationIT.java  |  109 -
 .../apache/phoenix/pig/PhoenixHBaseLoader.java  |   50 +-
 .../apache/phoenix/pig/PhoenixHBaseStorage.java |  220 +-
 .../phoenix/pig/PhoenixPigConfiguration.java    |  340 -
 .../phoenix/pig/hadoop/PhoenixInputFormat.java  |  142 -
 .../phoenix/pig/hadoop/PhoenixInputSplit.java   |  134 -
 .../pig/hadoop/PhoenixOutputCommitter.java      |  111 -
 .../phoenix/pig/hadoop/PhoenixOutputFormat.java |   94 -
 .../phoenix/pig/hadoop/PhoenixRecord.java       |  112 -
 .../phoenix/pig/hadoop/PhoenixRecordReader.java |  142 -
 .../phoenix/pig/hadoop/PhoenixRecordWriter.java |   83 -
 .../util/ColumnInfoToStringEncoderDecoder.java  |   69 -
 .../phoenix/pig/util/PhoenixPigSchemaUtil.java  |   29 +-
 .../pig/util/QuerySchemaParserFunction.java     |   21 +-
 .../pig/util/SqlQueryToColumnInfoFunction.java  |   51 +-
 .../org/apache/phoenix/pig/util/TypeUtil.java   |  141 +-
 .../pig/writable/PhoenixPigDBWritable.java      |  121 +
 .../pig/PhoenixPigConfigurationTest.java        |   86 -
 .../ColumnInfoToStringEncoderDecoderTest.java   |   61 -
 .../pig/util/PhoenixPigSchemaUtilTest.java      |   17 +-
 .../pig/util/QuerySchemaParserFunctionTest.java |   22 +-
 .../util/SqlQueryToColumnInfoFunctionTest.java  |   22 +-
 phoenix-protocol/src/main/PGuidePosts.proto     |   10 +
 phoenix-protocol/src/main/PTable.proto          |    7 +-
 pom.xml                                         |    4 +-
 600 files changed, 37855 insertions(+), 21875 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9d24a94/phoenix-core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9d24a94/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 2c4a578,a57250e..64024c4
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@@ -21,10 -21,11 +21,12 @@@ import java.sql.SQLException
  import java.sql.SQLFeatureNotSupportedException;
  import java.util.ArrayList;
  import java.util.Collections;
+ import java.util.HashMap;
  import java.util.Iterator;
  import java.util.List;
+ import java.util.Map;
  
 +import com.google.common.base.Preconditions;
  import org.apache.hadoop.hbase.HConstants;
  import org.apache.hadoop.hbase.client.HTableInterface;
  import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@@ -316,9 -324,9 +325,9 @@@ public class FromCompiler 
          // on Windows because the millis timestamp granularity is so bad we sometimes won't
          // get the data back that we just upsert.
          private final int tsAddition;
-         
+ 
          private BaseColumnResolver(PhoenixConnection connection, int tsAddition) {
 -        	this.connection = connection;
 +            this.connection = Preconditions.checkNotNull(connection);
              this.client = connection == null ? null : new MetaDataClient(connection);
              this.tsAddition = tsAddition;
          }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9d24a94/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9d24a94/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9d24a94/pom.xml
----------------------------------------------------------------------


[14/50] [abbrv] phoenix git commit: PHOENIX-1620 Add API for getting tenant ID from an HBase row of a Phoenix table

Posted by ma...@apache.org.
PHOENIX-1620 Add API for getting tenant ID from an HBase row of a Phoenix table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f3c675bf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f3c675bf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f3c675bf

Branch: refs/heads/calcite
Commit: f3c675bf735d7d4e4534433f3406af15360ed5d9
Parents: 2d77033
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Feb 6 11:14:26 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Feb 6 14:59:01 2015 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/PhoenixRuntimeIT.java       | 138 +++++++++++++++++++
 .../org/apache/phoenix/util/PhoenixRuntime.java |  39 +++++-
 .../org/apache/phoenix/util/SchemaUtil.java     |   4 +
 .../java/org/apache/phoenix/query/BaseTest.java |  20 ++-
 .../apache/phoenix/util/PhoenixRuntimeTest.java |  58 ++++++++
 5 files changed, 255 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3c675bf/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
new file mode 100644
index 0000000..234ea1c
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class PhoenixRuntimeIT extends BaseHBaseManagedTimeIT {
+    private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        Scan scan = new Scan();
+        scan.setFilter(filter);
+        ResultScanner scanner = htable.getScanner(scan);
+        Result result = null;
+        ResultTuple tuple = new ResultTuple();
+        List<String> actualTenantIds = Lists.newArrayListWithExpectedSize(tenantIds.length);
+        List<String> expectedTenantIds = Arrays.asList(tenantIds);
+        while ((result = scanner.next()) != null) {
+            tuple.setResult(result);
+            e.evaluate(tuple, ptr);
+            String tenantId = (String)PVarchar.INSTANCE.toObject(ptr);
+            actualTenantIds.add(tenantId == null ? "" : tenantId);
+        }
+        // Need to sort because of salting
+        Collections.sort(actualTenantIds);
+        assertEquals(expectedTenantIds, actualTenantIds);
+    }
+    
+    @Test
+    public void testGetTenantIdExpressionForSaltedTable() throws Exception {
+        testGetTenantIdExpression(true);
+    }
+    
+    @Test
+    public void testGetTenantIdExpressionForUnsaltedTable() throws Exception {
+        testGetTenantIdExpression(false);
+    }
+    
+    private static Filter getUserTableAndViewsFilter() {
+        SingleColumnValueFilter tableFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, CompareOp.EQUAL, Bytes.toBytes(PTableType.TABLE.getSerializedValue()));
+        tableFilter.setFilterIfMissing(true);
+        SingleColumnValueFilter viewFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, CompareOp.EQUAL, Bytes.toBytes(PTableType.VIEW.getSerializedValue()));
+        viewFilter.setFilterIfMissing(true);
+        FilterList filter = new FilterList(FilterList.Operator.MUST_PASS_ONE, Arrays.asList(new Filter[] {tableFilter, viewFilter}));
+        return filter;
+    }
+    
+    private void testGetTenantIdExpression(boolean isSalted) throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.setAutoCommit(true);
+        String tableName = "FOO_" + (isSalted ? "SALTED" : "UNSALTED");
+        conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : ""));
+        conn.createStatement().execute("CREATE SEQUENCE s1");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')");
+        
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
+        Connection tsconn = DriverManager.getConnection(getUrl(), props);
+        tsconn.createStatement().execute("CREATE SEQUENCE s1");
+        Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
+        HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
+        assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] {"", "t1"} );
+        
+        tsconn.createStatement().execute("CREATE VIEW A.BAR(V1 VARCHAR) AS SELECT * FROM " + tableName);
+        Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+        HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+        assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] {"", "t1"} );
+        
+        Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
+        HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
+        assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] {"t1", "t2"} );
+        
+        conn.createStatement().execute("CREATE TABLE BAS (k1 VARCHAR PRIMARY KEY)");
+        Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, "BAS");
+        assertNull(e4);
+
+        tsconn.createStatement().execute("CREATE INDEX I1 ON A.BAR(V1)");
+        Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, "A.I1");
+        HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
+        assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] {"t1"} );
+
+        conn.createStatement().execute("CREATE INDEX I2 ON " + tableName + "(k2)");
+        Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, "I2");
+        HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("I2"));
+        assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] {"t1", "t2"} );
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3c675bf/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index b2d7851..02a2776 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -28,6 +28,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
@@ -56,6 +57,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.OrderByExpression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.QueryConstants;
@@ -66,13 +68,15 @@ import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
 import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.ValueBitSet;
+import org.apache.phoenix.schema.types.PDataType;
 
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableList;
@@ -927,5 +931,38 @@ public class PhoenixRuntime {
         }
         return pColumn;
     }
+    
+    /**
+     * Get expression that may be used to evaluate the tenant ID of a given row in a
+     * multi-tenant table. Both the SYSTEM.CATALOG table and the SYSTEM.SEQUENCE
+     * table are considered multi-tenant.
+     * @param conn open Phoenix connection
+     * @param fullTableName full table name
+     * @return An expression that may be evaluated for a row in the provided table or
+     * null if the table is not a multi-tenant table. 
+     * @throws SQLException if the table name is not found, a TableNotFoundException
+     * is thrown. If a multi-tenant local index is supplied a SQLFeatureNotSupportedException
+     * is thrown.
+     */
+    public static Expression getTenantIdExpression(Connection conn, String fullTableName) throws SQLException {
+        PTable table = getTable(conn, fullTableName);
+        // TODO: consider setting MULTI_TENANT = true for SYSTEM.CATALOG and SYSTEM.SEQUENCE
+        if (!SchemaUtil.isMetaTable(table) && !SchemaUtil.isSequenceTable(table) && !table.isMultiTenant()) {
+            return null;
+        }
+        if (table.getIndexType() == IndexType.LOCAL) {
+            /*
+             * With some hackery, we could deduce the tenant ID from a multi-tenant local index,
+             * however it's not clear that we'd want to maintain the same prefixing of the region
+             * start key, as the region boundaries may end up being different on a cluster being
+             * replicated/backed-up to (which is the use case driving the method).
+             */
+            throw new SQLFeatureNotSupportedException();
+        }
+        
+        int pkPosition = table.getBucketNum() == null ? 0 : 1;
+        List<PColumn> pkColumns = table.getPKColumns();
+        return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3c675bf/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 9ab0692..afd61ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -376,6 +376,10 @@ public class SchemaUtil {
         return Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES) == 0;
     }
 
+    public static boolean isSequenceTable(PTable table) {
+        return PhoenixDatabaseMetaData.SEQUENCE_FULLNAME.equals(table.getName().getString());
+    }
+
     public static boolean isMetaTable(PTable table) {
         return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(table.getSchemaName().getString()) && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(table.getTableName().getString());
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3c675bf/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index f81c3a9..9947440 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -821,14 +821,28 @@ public abstract class BaseTest {
         }
     }
     
-    private static void deletePriorSequences(long ts, Connection conn) throws Exception {
+    private static void deletePriorSequences(long ts, Connection globalConn) throws Exception {
         // TODO: drop tenant-specific sequences too
-        ResultSet rs = conn.createStatement().executeQuery("SELECT " 
+        ResultSet rs = globalConn.createStatement().executeQuery("SELECT " 
+                + PhoenixDatabaseMetaData.TENANT_ID + ","
                 + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," 
                 + PhoenixDatabaseMetaData.SEQUENCE_NAME 
                 + " FROM " + PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_ESCAPED);
+        String lastTenantId = null;
+        Connection conn = globalConn;
         while (rs.next()) {
-            conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(1), rs.getString(2)));
+            String tenantId = rs.getString(1);
+            if (tenantId != null && !tenantId.equals(lastTenantId))  {
+                if (lastTenantId != null) {
+                    conn.close();
+                }
+                // Open tenant-specific connection when we find a new one
+                Properties props = new Properties(globalConn.getClientInfo());
+                props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+                conn = DriverManager.getConnection(url, props);
+                lastTenantId = tenantId;
+            }
+            conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
         }
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3c675bf/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java
index a642e80..c1f3244 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -27,6 +28,7 @@ import static org.junit.Assert.fail;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.sql.SQLFeatureNotSupportedException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
@@ -34,7 +36,10 @@ import java.util.Properties;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.types.PDataType;
 import org.junit.Test;
 
@@ -155,4 +160,57 @@ public class PhoenixRuntimeTest extends BaseConnectionlessQueryTest {
             fail("Failed sql: " + sb.toString() + ExceptionUtils.getStackTrace(e));
         }
     }
+    
+    @Test
+    public void testGetTenantIdExpression() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        Expression e1 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
+        assertNull(e1);
+        Expression e2 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+        assertNotNull(e2);
+
+        Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
+        assertNotNull(e3);
+        
+        conn.createStatement().execute("CREATE TABLE FOO (k VARCHAR PRIMARY KEY)");
+        Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, "FOO");
+        assertNull(e4);
+        
+        conn.createStatement().execute("CREATE TABLE A.BAR (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true");
+        Expression e5 = PhoenixRuntime.getTenantIdExpression(conn, "A.BAR");
+        assertNotNull(e5);
+
+        conn.createStatement().execute("CREATE INDEX I1 ON A.BAR (K2)");
+        Expression e5A = PhoenixRuntime.getTenantIdExpression(conn, "A.I1");
+        assertNotNull(e5A);
+        
+        conn.createStatement().execute("CREATE TABLE BAS (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true, SALT_BUCKETS=3");
+        Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, "BAS");
+        assertNotNull(e6);
+        
+        conn.createStatement().execute("CREATE INDEX I2 ON BAS (K2)");
+        Expression e6A = PhoenixRuntime.getTenantIdExpression(conn, "I2");
+        assertNotNull(e6A);
+        
+        try {
+            PhoenixRuntime.getTenantIdExpression(conn, "NOT.ATABLE");
+            fail();
+        } catch (TableNotFoundException e) {
+            // Expected
+        }
+        
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
+        Connection tsconn = DriverManager.getConnection(getUrl(), props);
+        tsconn.createStatement().execute("CREATE VIEW V(V1 VARCHAR) AS SELECT * FROM BAS");
+        Expression e7 = PhoenixRuntime.getTenantIdExpression(tsconn, "V");
+        assertNotNull(e7);
+        tsconn.createStatement().execute("CREATE LOCAL INDEX I3 ON V (V1)");
+        try {
+            PhoenixRuntime.getTenantIdExpression(tsconn, "I3");
+            fail();
+        } catch (SQLFeatureNotSupportedException e) {
+            // Expected
+        }
+    }
 }


[03/50] [abbrv] phoenix git commit: PHOENIX-1596 Turning tracing on results in region server crash

Posted by ma...@apache.org.
PHOENIX-1596 Turning tracing on results in region server crash


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a1baf2ac
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a1baf2ac
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a1baf2ac

Branch: refs/heads/calcite
Commit: a1baf2aca955551099f1e21086cfca752776cce0
Parents: 1c58f44
Author: Samarth <sa...@salesforce.com>
Authored: Fri Jan 30 15:08:44 2015 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Jan 30 15:08:44 2015 -0800

----------------------------------------------------------------------
 .../coprocessor/BaseScannerRegionObserver.java  |  31 ++-
 .../org/apache/phoenix/hbase/index/Indexer.java | 225 +++++++++----------
 .../org/apache/phoenix/trace/util/Tracing.java  |  67 +-----
 3 files changed, 133 insertions(+), 190 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1baf2ac/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 1647e5c..69cdcb6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -47,11 +47,11 @@ import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
-import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
 
 import com.google.common.collect.ImmutableList;
 
@@ -159,24 +159,39 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 return s;
             }
             boolean success =false;
-            // turn on tracing, if its enabled
-            final Span child = Tracing.childOnServer(scan, rawConf, SCANNER_OPENED_TRACE_INFO);
+            // Save the current span. When done with the child span, reset the span back to
+            // what it was. Otherwise, this causes the thread local storing the current span 
+            // to not be reset back to null causing catastrophic infinite loops
+            // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596
+            // TraceScope can't be used here because closing the scope will end up calling 
+            // currentSpan.stop() and that should happen only when we are closing the scanner.
+            final Span savedSpan = Trace.currentSpan();
+            final Span child = Trace.startSpan(SCANNER_OPENED_TRACE_INFO, savedSpan).getSpan();
             try {
                 RegionScanner scanner = doPostScannerOpen(c, scan, s);
                 scanner = new DelegateRegionScanner(scanner) {
+                    // This isn't very obvious but close() could be called in a thread
+                    // that is different from the thread that created the scanner.
                     @Override
                     public void close() throws IOException {
-                        if (child != null) {
-                            child.stop();
+                        try {
+                            delegate.close();
+                        } finally {
+                            if (child != null) {
+                                child.stop();
+                            }
                         }
-                        delegate.close();
                     }
                 };
                 success = true;
                 return scanner;
             } finally {
-                if (!success && child != null) {
-                    child.stop();
+                try {
+                    if (!success && child != null) {
+                        child.stop();
+                    }
+                } finally {
+                    Trace.continueSpan(savedSpan);
                 }
             }
         } catch (Throwable t) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1baf2ac/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index b841410..a4fc96b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -210,79 +210,76 @@ public class Indexer extends BaseRegionObserver {
   }
 
   public void preBatchMutateWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Mutation> miniBatchOp) throws Throwable {
+          MiniBatchOperationInProgress<Mutation> miniBatchOp) throws Throwable {
 
-    // first group all the updates for a single row into a single update to be processed
-    Map<ImmutableBytesPtr, MultiMutation> mutations =
-        new HashMap<ImmutableBytesPtr, MultiMutation>();
+      // first group all the updates for a single row into a single update to be processed
+      Map<ImmutableBytesPtr, MultiMutation> mutations =
+              new HashMap<ImmutableBytesPtr, MultiMutation>();
 
-    Durability defaultDurability = Durability.SYNC_WAL;
-    if(c.getEnvironment().getRegion() != null) {
-    	defaultDurability = c.getEnvironment().getRegion().getTableDesc().getDurability();
-    	defaultDurability = (defaultDurability == Durability.USE_DEFAULT) ? 
-    			Durability.SYNC_WAL : defaultDurability;
-    }
-    Durability durability = Durability.SKIP_WAL;
-    for (int i = 0; i < miniBatchOp.size(); i++) {
-      Mutation m = miniBatchOp.getOperation(i);
-      // skip this mutation if we aren't enabling indexing
-      // unfortunately, we really should ask if the raw mutation (rather than the combined mutation)
-      // should be indexed, which means we need to expose another method on the builder. Such is the
-      // way optimization go though.
-      if (!this.builder.isEnabled(m)) {
-        continue;
-      }
-      
-      Durability effectiveDurablity = (m.getDurability() == Durability.USE_DEFAULT) ? 
-    		  defaultDurability : m.getDurability();
-      if (effectiveDurablity.ordinal() > durability.ordinal()) {
-        durability = effectiveDurablity;
+      Durability defaultDurability = Durability.SYNC_WAL;
+      if(c.getEnvironment().getRegion() != null) {
+          defaultDurability = c.getEnvironment().getRegion().getTableDesc().getDurability();
+          defaultDurability = (defaultDurability == Durability.USE_DEFAULT) ? 
+                  Durability.SYNC_WAL : defaultDurability;
       }
+      Durability durability = Durability.SKIP_WAL;
+      for (int i = 0; i < miniBatchOp.size(); i++) {
+          Mutation m = miniBatchOp.getOperation(i);
+          // skip this mutation if we aren't enabling indexing
+          // unfortunately, we really should ask if the raw mutation (rather than the combined mutation)
+          // should be indexed, which means we need to expose another method on the builder. Such is the
+          // way optimization go though.
+          if (!this.builder.isEnabled(m)) {
+              continue;
+          }
+
+          Durability effectiveDurablity = (m.getDurability() == Durability.USE_DEFAULT) ? 
+                  defaultDurability : m.getDurability();
+          if (effectiveDurablity.ordinal() > durability.ordinal()) {
+              durability = effectiveDurablity;
+          }
 
-      // add the mutation to the batch set
-      ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
-      MultiMutation stored = mutations.get(row);
-      // we haven't seen this row before, so add it
-      if (stored == null) {
-        stored = new MultiMutation(row);
-        mutations.put(row, stored);
+          // add the mutation to the batch set
+          ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
+          MultiMutation stored = mutations.get(row);
+          // we haven't seen this row before, so add it
+          if (stored == null) {
+              stored = new MultiMutation(row);
+              mutations.put(row, stored);
+          }
+          stored.addAll(m);
       }
-      stored.addAll(m);
-    }
-    
-    // early exit if it turns out we don't have any edits
-    if (mutations.entrySet().size() == 0) {
-      return;
-    }
 
-    // dump all the index updates into a single WAL. They will get combined in the end anyways, so
-    // don't worry which one we get
-    WALEdit edit = miniBatchOp.getWalEdit(0);
-    if (edit == null) {
-        edit = new WALEdit();
-        miniBatchOp.setWalEdit(0, edit);
-    }
+      // early exit if it turns out we don't have any edits
+      if (mutations.entrySet().size() == 0) {
+          return;
+      }
 
-        // get the current span, or just use a null-span to avoid a bunch of if statements
-    TraceScope scope = Trace.startSpan("Starting to build index updates");
-        Span current = scope.getSpan();
-        if (current == null) {
-            current = NullSpan.INSTANCE;
-        }
+      // dump all the index updates into a single WAL. They will get combined in the end anyways, so
+      // don't worry which one we get
+      WALEdit edit = miniBatchOp.getWalEdit(0);
+      if (edit == null) {
+          edit = new WALEdit();
+          miniBatchOp.setWalEdit(0, edit);
+      }
 
-    // get the index updates for all elements in this batch
-    Collection<Pair<Mutation, byte[]>> indexUpdates =
-        this.builder.getIndexUpdate(miniBatchOp, mutations.values());
+      // get the current span, or just use a null-span to avoid a bunch of if statements
+      try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
+          Span current = scope.getSpan();
+          if (current == null) {
+              current = NullSpan.INSTANCE;
+          }
 
-        current.addTimelineAnnotation("Built index updates, doing preStep");
-        TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
+          // get the index updates for all elements in this batch
+          Collection<Pair<Mutation, byte[]>> indexUpdates =
+                  this.builder.getIndexUpdate(miniBatchOp, mutations.values());
 
-    // write them, either to WAL or the index tables
-    doPre(indexUpdates, edit, durability);
+          current.addTimelineAnnotation("Built index updates, doing preStep");
+          TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
 
-        // close the span
-        current.stop();
-        scope.close();
+          // write them, either to WAL or the index tables
+          doPre(indexUpdates, edit, durability);
+      }
   }
 
   private class MultiMutation extends Mutation {
@@ -416,65 +413,59 @@ public class Indexer extends BaseRegionObserver {
   }
 
   private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability)
-      throws Exception {
-    //short circuit, if we don't need to do any work
-    if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m)) {
-      // already did the index update in prePut, so we are done
-      return;
-    }
-
-        // get the current span, or just use a null-span to avoid a bunch of if statements
-    TraceScope scope = Trace.startSpan("Completing index writes");
-        Span current = scope.getSpan();
-        if (current == null) {
-            current = NullSpan.INSTANCE;
-        }
-
-    // there is a little bit of excess here- we iterate all the non-indexed kvs for this check first
-    // and then do it again later when getting out the index updates. This should be pretty minor
-    // though, compared to the rest of the runtime
-    IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
-
-    /*
-     * early exit - we have nothing to write, so we don't need to do anything else. NOTE: we don't
-     * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it in doPre if there are
-     * no index updates.
-     */
-    if (ikv == null) {
-            current.stop();
-            scope.close();
-      return;
-    }
-
-    /*
-     * only write the update if we haven't already seen this batch. We only want to write the batch
-     * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
-     * lead to writing all the index updates for each Put/Delete).
-     */
-    if (!ikv.getBatchFinished()) {
-      Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
-
-      // the WAL edit is kept in memory and we already specified the factory when we created the
-      // references originally - therefore, we just pass in a null factory here and use the ones
-      // already specified on each reference
-      try {
-                current.addTimelineAnnotation("Actually doing index update for first time");
-          writer.writeAndKillYourselfOnFailure(indexUpdates);
-      } finally {
-        // With a custom kill policy, we may throw instead of kill the server.
-        // Without doing this in a finally block (at least with the mini cluster),
-        // the region server never goes down.
+          throws Exception {
+      //short circuit, if we don't need to do any work
+      if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m)) {
+          // already did the index update in prePut, so we are done
+          return;
+      }
 
-        // mark the batch as having been written. In the single-update case, this never gets check
-        // again, but in the batch case, we will check it again (see above).
-        ikv.markBatchFinished();
+      // get the current span, or just use a null-span to avoid a bunch of if statements
+      try (TraceScope scope = Trace.startSpan("Completing index writes")) {
+          Span current = scope.getSpan();
+          if (current == null) {
+              current = NullSpan.INSTANCE;
+          }
 
-                // finish the span
+          // there is a little bit of excess here- we iterate all the non-indexed kvs for this check first
+          // and then do it again later when getting out the index updates. This should be pretty minor
+          // though, compared to the rest of the runtime
+          IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
+
+          /*
+           * early exit - we have nothing to write, so we don't need to do anything else. NOTE: we don't
+           * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it in doPre if there are
+           * no index updates.
+           */
+          if (ikv == null) {
+              return;
+          }
 
-                current.stop();
-                scope.close();
+          /*
+           * only write the update if we haven't already seen this batch. We only want to write the batch
+           * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
+           * lead to writing all the index updates for each Put/Delete).
+           */
+          if (!ikv.getBatchFinished()) {
+              Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
+
+              // the WAL edit is kept in memory and we already specified the factory when we created the
+              // references originally - therefore, we just pass in a null factory here and use the ones
+              // already specified on each reference
+              try {
+                  current.addTimelineAnnotation("Actually doing index update for first time");
+                  writer.writeAndKillYourselfOnFailure(indexUpdates);
+              } finally {
+                  // With a custom kill policy, we may throw instead of kill the server.
+                  // Without doing this in a finally block (at least with the mini cluster),
+                  // the region server never goes down.
+
+                  // mark the batch as having been written. In the single-update case, this never gets check
+                  // again, but in the batch case, we will check it again (see above).
+                  ikv.markBatchFinished();
+              }
+          }
       }
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1baf2ac/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index b093b9c..7e1df72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -28,9 +28,6 @@ import javax.annotation.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.OperationWithAttributes;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.call.CallWrapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -40,7 +37,6 @@ import org.apache.phoenix.trace.TraceMetricSource;
 import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceInfo;
 import org.cloudera.htrace.TraceScope;
 import org.cloudera.htrace.Tracer;
 import org.cloudera.htrace.impl.ProbabilitySampler;
@@ -62,15 +58,10 @@ public class Tracing {
     // Constants for tracing across the wire
     public static final String TRACE_ID_ATTRIBUTE_KEY = "phoenix.trace.traceid";
     public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid";
-    private static final String START_SPAN_MESSAGE = "Span received on server. Starting child";
-
+    
     // Constants for passing into the metrics system
     private static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance";
-    /**
-     * We always trace on the server, assuming the client has requested tracing on the request
-     */
-    private static Sampler<?> SERVER_TRACE_LEVEL = Sampler.ALWAYS;
-
+    
     /**
      * Manage the types of frequencies that we support. By default, we never turn on tracing.
      */
@@ -169,60 +160,6 @@ public class Tracing {
                 + SEPARATOR + span.getSpanId();
     }
 
-    /**
-     * Check to see if tracing is current enabled. The trace for this thread is returned, if we are
-     * already tracing. Otherwise, checks to see if mutation has tracing enabled, and if so, starts
-     * a new span with the {@link Mutation}'s specified span as its parent.
-     * <p>
-     * This should only be run on the server-side as we base tracing on if we are currently tracing
-     * (started higher in the call-stack) or if the {@link Mutation} has the tracing attributes
-     * defined. As such, we would expect to continue the trace on the server-side based on the
-     * original sampling parameters.
-     * @param scan {@link Mutation} to check
-     * @param conf {@link Configuration} to read for the current sampler
-     * @param description description of the child span to start
-     * @return <tt>null</tt> if tracing is not enabled, or the parent {@link Span}
-     */
-    public static Span childOnServer(OperationWithAttributes scan, Configuration conf,
-            String description) {
-        // check to see if we are currently tracing. Generally, this will only work when we go to
-        // 0.96. CPs should always be setting up and tearing down their own tracing
-        Span current = Trace.currentSpan();
-        if (current == null) {
-            // its not tracing yet, but maybe it should be.
-            current = enable(scan, conf, description);
-        } else {
-            current = Trace.startSpan(description, current).getSpan();
-        }
-        return current;
-    }
-
-    /**
-     * Check to see if this mutation has tracing enabled, and if so, get a new span with the
-     * {@link Mutation}'s specified span as its parent.
-     * @param map mutation to check
-     * @param conf {@link Configuration} to check for the {@link Sampler} configuration, if we are
-     *            tracing
-     * @param description on the child to start
-     * @return a child span of the mutation, or <tt>null</tt> if tracing is not enabled.
-     */
-    @SuppressWarnings("unchecked")
-    private static Span enable(OperationWithAttributes map, Configuration conf, String description) {
-        byte[] traceid = map.getAttribute(TRACE_ID_ATTRIBUTE_KEY);
-        if (traceid == null) {
-            return NullSpan.INSTANCE;
-        }
-        byte[] spanid = map.getAttribute(SPAN_ID_ATTRIBUTE_KEY);
-        if (spanid == null) {
-            LOG.error("TraceID set to " + Bytes.toLong(traceid) + ", but span id was not set!");
-            return NullSpan.INSTANCE;
-        }
-        Sampler<?> sampler = SERVER_TRACE_LEVEL;
-        TraceInfo parent = new TraceInfo(Bytes.toLong(traceid), Bytes.toLong(spanid));
-        return Trace.startSpan(START_SPAN_MESSAGE + ": " + description,
-            (Sampler<TraceInfo>) sampler, parent).getSpan();
-    }
-
     public static Span child(Span s, String d) {
         if (s == null) {
             return NullSpan.INSTANCE;


[36/50] [abbrv] phoenix git commit: PHOENIX-1672 RegionScanner.nextRaw contract not implemented correctly

Posted by ma...@apache.org.
PHOENIX-1672 RegionScanner.nextRaw contract not implemented correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3d50147f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3d50147f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3d50147f

Branch: refs/heads/calcite
Commit: 3d50147f213dd3f830b039159fde68eae10ae233
Parents: c633151
Author: Andrew Purtell <ap...@apache.org>
Authored: Sat Feb 21 20:34:08 2015 -0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Sat Feb 21 20:34:08 2015 -0800

----------------------------------------------------------------------
 .../GroupedAggregateRegionObserver.java         |  96 ++++----
 .../UngroupedAggregateRegionObserver.java       | 246 ++++++++++---------
 .../phoenix/index/PhoenixIndexBuilder.java      |  18 +-
 .../iterate/RegionScannerResultIterator.java    |  36 +--
 4 files changed, 216 insertions(+), 180 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d50147f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 8b59b85..0984b06 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -375,7 +375,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
      * @param limit TODO
      */
     private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
-            final RegionScanner s, final List<Expression> expressions,
+            final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, long limit) throws IOException {
         if (logger.isDebugEnabled()) {
             logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
@@ -410,28 +410,30 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             HRegion region = c.getEnvironment().getRegion();
             region.startRegionOperation();
             try {
-                do {
-                    List<Cell> results = new ArrayList<Cell>();
-                    // Results are potentially returned even when the return
-                    // value of s.next is false
-                    // since this is an indication of whether or not there are
-                    // more values after the
-                    // ones returned
-                    hasMore = s.nextRaw(results);
-                    if (!results.isEmpty()) {
-                        result.setKeyValues(results);
-                        ImmutableBytesWritable key =
+                synchronized (scanner) {
+                    do {
+                        List<Cell> results = new ArrayList<Cell>();
+                        // Results are potentially returned even when the return
+                        // value of s.next is false
+                        // since this is an indication of whether or not there are
+                        // more values after the
+                        // ones returned
+                        hasMore = scanner.nextRaw(results);
+                        if (!results.isEmpty()) {
+                            result.setKeyValues(results);
+                            ImmutableBytesWritable key =
                                 TupleUtil.getConcatenatedValue(result, expressions);
-                        Aggregator[] rowAggregators = groupByCache.cache(key);
-                        // Aggregate values here
-                        aggregators.aggregate(rowAggregators, result);
-                    }
-                } while (hasMore && groupByCache.size() < limit);
+                            Aggregator[] rowAggregators = groupByCache.cache(key);
+                            // Aggregate values here
+                            aggregators.aggregate(rowAggregators, result);
+                        }
+                    } while (hasMore && groupByCache.size() < limit);
+                }
             } finally {
                 region.closeRegionOperation();
             }
 
-            RegionScanner regionScanner = groupByCache.getScanner(s);
+            RegionScanner regionScanner = groupByCache.getScanner(scanner);
 
             // Do not sort here, but sort back on the client instead
             // The reason is that if the scan ever extends beyond a region
@@ -453,7 +455,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
      * @throws IOException 
      */
     private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c,
-            final Scan scan, final RegionScanner s, final List<Expression> expressions,
+            final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, final long limit) throws IOException {
 
         if (logger.isDebugEnabled()) {
@@ -466,12 +468,12 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
 
             @Override
             public HRegionInfo getRegionInfo() {
-                return s.getRegionInfo();
+                return scanner.getRegionInfo();
             }
 
             @Override
             public void close() throws IOException {
-                s.close();
+                scanner.close();
             }
 
             @Override
@@ -488,32 +490,36 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 HRegion region = c.getEnvironment().getRegion();
                 region.startRegionOperation();
                 try {
-                    do {
-                        List<Cell> kvs = new ArrayList<Cell>();
-                        // Results are potentially returned even when the return
-                        // value of s.next is false
-                        // since this is an indication of whether or not there
-                        // are more values after the
-                        // ones returned
-                        hasMore = s.nextRaw(kvs);
-                        if (!kvs.isEmpty()) {
-                            result.setKeyValues(kvs);
-                            key = TupleUtil.getConcatenatedValue(result, expressions);
-                            aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
-                            if (!aggBoundary) {
-                                aggregators.aggregate(rowAggregators, result);
-                                if (logger.isDebugEnabled()) {
-                                    logger.debug(LogUtil.addCustomAnnotations("Row passed filters: " + kvs
+                    synchronized (scanner) {
+                        do {
+                            List<Cell> kvs = new ArrayList<Cell>();
+                            // Results are potentially returned even when the return
+                            // value of s.next is false
+                            // since this is an indication of whether or not there
+                            // are more values after the
+                            // ones returned
+                            hasMore = scanner.nextRaw(kvs);
+                            if (!kvs.isEmpty()) {
+                                result.setKeyValues(kvs);
+                                key = TupleUtil.getConcatenatedValue(result, expressions);
+                                aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
+                                if (!aggBoundary) {
+                                    aggregators.aggregate(rowAggregators, result);
+                                    if (logger.isDebugEnabled()) {
+                                        logger.debug(LogUtil.addCustomAnnotations(
+                                            "Row passed filters: " + kvs
                                             + ", aggregated values: "
-                                            + Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan)));
+                                            + Arrays.asList(rowAggregators),
+                                            ScanUtil.getCustomAnnotations(scan)));
+                                    }
+                                    currentKey = key;
                                 }
-                                currentKey = key;
                             }
-                        }
-                        atLimit = rowCount + countOffset >= limit;
-                        // Do rowCount + 1 b/c we don't have to wait for a complete
-                        // row in the case of a DISTINCT with a LIMIT
-                    } while (hasMore && !aggBoundary && !atLimit);
+                            atLimit = rowCount + countOffset >= limit;
+                            // Do rowCount + 1 b/c we don't have to wait for a complete
+                            // row in the case of a DISTINCT with a LIMIT
+                        } while (hasMore && !aggBoundary && !atLimit);
+                    }
                 } finally {
                     region.closeRegionOperation();
                 }
@@ -555,7 +561,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             
             @Override
             public long getMaxResultSize() {
-                return s.getMaxResultSize();
+                return scanner.getMaxResultSize();
             }
         };
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d50147f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index a3b2faa..71c4dc6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -253,128 +253,152 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         final RegionScanner innerScanner = theScanner;
         region.startRegionOperation();
         try {
-            do {
-                List<Cell> results = new ArrayList<Cell>();
-                // Results are potentially returned even when the return value of s.next is false
-                // since this is an indication of whether or not there are more values after the
-                // ones returned
-                hasMore = innerScanner.nextRaw(results);
-                if(stats != null) {
-                    stats.collectStatistics(results);
-                }
-                
-                if (!results.isEmpty()) {
-                    rowCount++;
-                    result.setKeyValues(results);
-                    try {
-                        if (buildLocalIndex) {
-                            for (IndexMaintainer maintainer : indexMaintainers) {
-                                if (!results.isEmpty()) {
-                                    result.getKey(ptr);
-                                    ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr),results);
-                                    Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, c.getEnvironment().getRegion().getStartKey(), c.getEnvironment().getRegion().getEndKey());
-                                    indexMutations.add(put);
+            synchronized (innerScanner) {
+                do {
+                    List<Cell> results = new ArrayList<Cell>();
+                    // Results are potentially returned even when the return value of s.next is false
+                    // since this is an indication of whether or not there are more values after the
+                    // ones returned
+                    hasMore = innerScanner.nextRaw(results);
+                    if (stats != null) {
+                        stats.collectStatistics(results);
+                    }
+                    if (!results.isEmpty()) {
+                        rowCount++;
+                        result.setKeyValues(results);
+                        try {
+                            if (buildLocalIndex) {
+                                for (IndexMaintainer maintainer : indexMaintainers) {
+                                    if (!results.isEmpty()) {
+                                        result.getKey(ptr);
+                                        ValueGetter valueGetter =
+                                            maintainer.createGetterFromKeyValues(
+                                                ImmutableBytesPtr.copyBytesIfNecessary(ptr),
+                                                results);
+                                        Put put = maintainer.buildUpdateMutation(kvBuilder,
+                                            valueGetter, ptr, ts,
+                                            c.getEnvironment().getRegion().getStartKey(),
+                                            c.getEnvironment().getRegion().getEndKey());
+                                        indexMutations.add(put);
+                                    }
                                 }
-                            }
-                            result.setKeyValues(results);
-                        } else if (isDelete) {
-                            // FIXME: the version of the Delete constructor without the lock args was introduced
-                            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
-                            // of the client.
-                            Cell firstKV = results.get(0);
-                            Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), 
-                                firstKV.getRowLength(),ts);
-                            mutations.add(delete);
-                        } else if (isUpsert) {
-                            Arrays.fill(values, null);
-                            int i = 0;
-                            List<PColumn> projectedColumns = projectedTable.getColumns();
-                            for (; i < projectedTable.getPKColumns().size(); i++) {
-                                Expression expression = selectExpressions.get(i);
-                                if (expression.evaluate(result, ptr)) {
-                                    values[i] = ptr.copyBytes();
-                                    // If SortOrder from expression in SELECT doesn't match the
-                                    // column being projected into then invert the bits.
-                                    if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
-                                        SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
+                                result.setKeyValues(results);
+                            } else if (isDelete) {
+                                // FIXME: the version of the Delete constructor without the lock
+                                // args was introduced in 0.94.4, thus if we try to use it here
+                                // we can no longer use the 0.94.2 version of the client.
+                              Cell firstKV = results.get(0);
+                              Delete delete = new Delete(firstKV.getRowArray(),
+                                  firstKV.getRowOffset(), firstKV.getRowLength(),ts);
+                              mutations.add(delete);
+                            } else if (isUpsert) {
+                                Arrays.fill(values, null);
+                                int i = 0;
+                                List<PColumn> projectedColumns = projectedTable.getColumns();
+                                for (; i < projectedTable.getPKColumns().size(); i++) {
+                                    Expression expression = selectExpressions.get(i);
+                                    if (expression.evaluate(result, ptr)) {
+                                        values[i] = ptr.copyBytes();
+                                        // If SortOrder from expression in SELECT doesn't match the
+                                        // column being projected into then invert the bits.
+                                        if (expression.getSortOrder() !=
+                                            projectedColumns.get(i).getSortOrder()) {
+                                            SortOrder.invert(values[i], 0, values[i], 0,
+                                                values[i].length);
+                                        }
                                     }
                                 }
-                            }
-                            projectedTable.newKey(ptr, values);
-                            PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
-                            for (; i < projectedColumns.size(); i++) {
-                                Expression expression = selectExpressions.get(i);
-                                if (expression.evaluate(result, ptr)) {
-                                    PColumn column = projectedColumns.get(i);
-                                    Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
-                                    // We are guaranteed that the two column will have the same type.
-                                    if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
-                                            expression.getMaxLength(),  expression.getScale(), 
-                                            column.getMaxLength(), column.getScale())) {
-                                        throw new ValueTypeIncompatibleException(column.getDataType(),
-                                                column.getMaxLength(), column.getScale());
+                                projectedTable.newKey(ptr, values);
+                                PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
+                                for (; i < projectedColumns.size(); i++) {
+                                    Expression expression = selectExpressions.get(i);
+                                    if (expression.evaluate(result, ptr)) {
+                                        PColumn column = projectedColumns.get(i);
+                                        Object value = expression.getDataType()
+                                            .toObject(ptr, column.getSortOrder());
+                                        // We are guaranteed that the two column will have the
+                                        // same type.
+                                        if (!column.getDataType().isSizeCompatible(ptr, value,
+                                                column.getDataType(), expression.getMaxLength(),
+                                                expression.getScale(), column.getMaxLength(),
+                                                column.getScale())) {
+                                            throw new ValueTypeIncompatibleException(
+                                                column.getDataType(), column.getMaxLength(),
+                                                column.getScale());
+                                        }
+                                        column.getDataType().coerceBytes(ptr, value,
+                                            expression.getDataType(), expression.getMaxLength(),
+                                            expression.getScale(), expression.getSortOrder(), 
+                                            column.getMaxLength(), column.getScale(),
+                                            column.getSortOrder());
+                                        byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                                        row.setValue(column, bytes);
                                     }
-                                    column.getDataType().coerceBytes(ptr, value, expression.getDataType(),
-                                            expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), 
-                                            column.getMaxLength(), column.getScale(), column.getSortOrder());
-                                    byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
-                                    row.setValue(column, bytes);
+                                }
+                                for (Mutation mutation : row.toRowMutations()) {
+                                    mutations.add(mutation);
+                                }
+                                for (i = 0; i < selectExpressions.size(); i++) {
+                                    selectExpressions.get(i).reset();
+                                }
+                            } else if (deleteCF != null && deleteCQ != null) {
+                                // No need to search for delete column, since we project only it
+                                // if no empty key value is being set
+                                if (emptyCF == null ||
+                                        result.getValue(deleteCF, deleteCQ) != null) {
+                                    Delete delete = new Delete(results.get(0).getRowArray(),
+                                        results.get(0).getRowOffset(),
+                                        results.get(0).getRowLength());
+                                    delete.deleteColumns(deleteCF,  deleteCQ, ts);
+                                    mutations.add(delete);
                                 }
                             }
-                            for (Mutation mutation : row.toRowMutations()) {
-                                mutations.add(mutation);
-                            }
-                            for (i = 0; i < selectExpressions.size(); i++) {
-                                selectExpressions.get(i).reset();
+                            if (emptyCF != null) {
+                                /*
+                                 * If we've specified an emptyCF, then we need to insert an empty
+                                 * key value "retroactively" for any key value that is visible at
+                                 * the timestamp that the DDL was issued. Key values that are not
+                                 * visible at this timestamp will not ever be projected up to
+                                 * scans past this timestamp, so don't need to be considered.
+                                 * We insert one empty key value per row per timestamp.
+                                 */
+                                Set<Long> timeStamps =
+                                    Sets.newHashSetWithExpectedSize(results.size());
+                                for (Cell kv : results) {
+                                    long kvts = kv.getTimestamp();
+                                    if (!timeStamps.contains(kvts)) {
+                                        Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
+                                            kv.getRowLength());
+                                        put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
+                                            ByteUtil.EMPTY_BYTE_ARRAY);
+                                        mutations.add(put);
+                                    }
+                                }
                             }
-                        } else if (deleteCF != null && deleteCQ != null) {
-                            // No need to search for delete column, since we project only it
-                            // if no empty key value is being set
-                            if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
-                                Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), 
-                                  results.get(0).getRowLength());
-                                delete.deleteColumns(deleteCF,  deleteCQ, ts);
-                                mutations.add(delete);
+                            // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
+                            if (!mutations.isEmpty() && batchSize > 0 &&
+                                    mutations.size() % batchSize == 0) {
+                                commitBatch(region, mutations, indexUUID);
+                                mutations.clear();
                             }
-                        }
-                        if (emptyCF != null) {
-                            /*
-                             * If we've specified an emptyCF, then we need to insert an empty
-                             * key value "retroactively" for any key value that is visible at
-                             * the timestamp that the DDL was issued. Key values that are not
-                             * visible at this timestamp will not ever be projected up to
-                             * scans past this timestamp, so don't need to be considered.
-                             * We insert one empty key value per row per timestamp.
-                             */
-                            Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
-                            for (Cell kv : results) {
-                                long kvts = kv.getTimestamp();
-                                if (!timeStamps.contains(kvts)) {
-                                    Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
-                                    put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
-                                    mutations.add(put);
-                                }
+                            // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
+                            if (!indexMutations.isEmpty() && batchSize > 0 &&
+                                    indexMutations.size() % batchSize == 0) {
+                                commitIndexMutations(c, region, indexMutations);
                             }
+                        } catch (ConstraintViolationException e) {
+                            // Log and ignore in count
+                            logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
+                                region.getRegionNameAsString() + " with values " +
+                                SchemaUtil.toString(values),
+                                ScanUtil.getCustomAnnotations(scan)), e);
+                            continue;
                         }
-                        // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
-                        if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) {
-                            commitBatch(region, mutations, indexUUID);
-                            mutations.clear();
-                        }
-                        // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
-                        if (!indexMutations.isEmpty() && batchSize > 0 && indexMutations.size() % batchSize == 0) {
-                            commitIndexMutations(c, region, indexMutations);
-                        }
-
-                    } catch (ConstraintViolationException e) {
-                        // Log and ignore in count
-                        logger.error(LogUtil.addCustomAnnotations("Failed to create row in " + region.getRegionNameAsString() + " with values " + SchemaUtil.toString(values), ScanUtil.getCustomAnnotations(scan)), e);
-                        continue;
+                        aggregators.aggregate(rowAggregators, result);
+                        hasAny = true;
                     }
-                    aggregators.aggregate(rowAggregators, result);
-                    hasAny = true;
-                }
-            } while (hasMore);
+                } while (hasMore);
+            }
         } finally {
             try {
                 if (stats != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d50147f/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index a0bd7c5..b89c807 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -78,14 +78,16 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
         // Run through the scanner using internal nextRaw method
         region.startRegionOperation();
         try {
-            boolean hasMore;
-            do {
-                List<Cell> results = Lists.newArrayList();
-                // Results are potentially returned even when the return value of s.next is false
-                // since this is an indication of whether or not there are more values after the
-                // ones returned
-                hasMore = scanner.nextRaw(results);
-            } while (hasMore);
+            synchronized (scanner) {
+                boolean hasMore;
+                do {
+                    List<Cell> results = Lists.newArrayList();
+                    // Results are potentially returned even when the return value of s.next is
+                    // false since this is an indication of whether or not there are more values
+                    // after the ones returned
+                    hasMore = scanner.nextRaw(results);
+                } while (hasMore);
+            }
         } finally {
             try {
                 scanner.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d50147f/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
index bff0936..88e141a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
@@ -38,23 +38,27 @@ public class RegionScannerResultIterator extends BaseResultIterator {
     
     @Override
     public Tuple next() throws SQLException {
-        try {
-            // TODO: size
-            List<Cell> results = new ArrayList<Cell>();
-            // Results are potentially returned even when the return value of s.next is false
-            // since this is an indication of whether or not there are more values after the
-            // ones returned
-            boolean hasMore = scanner.nextRaw(results);
-            if (!hasMore && results.isEmpty()) {
-                return null;
+        // XXX: No access here to the region instance to enclose this with startRegionOperation / 
+        // stopRegionOperation 
+        synchronized (scanner) {
+            try {
+                // TODO: size
+                List<Cell> results = new ArrayList<Cell>();
+                // Results are potentially returned even when the return value of s.next is false
+                // since this is an indication of whether or not there are more values after the
+                // ones returned
+                boolean hasMore = scanner.nextRaw(results);
+                if (!hasMore && results.isEmpty()) {
+                    return null;
+                }
+                // We instantiate a new tuple because in all cases currently we hang on to it
+                // (i.e. to compute and hold onto the TopN).
+                MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+                tuple.setKeyValues(results);
+                return tuple;
+            } catch (IOException e) {
+                throw ServerUtil.parseServerException(e);
             }
-            // We instantiate a new tuple because in all cases currently we hang on to it (i.e.
-            // to compute and hold onto the TopN).
-            MultiKeyValueTuple tuple = new MultiKeyValueTuple();
-            tuple.setKeyValues(results);
-            return tuple;
-        } catch (IOException e) {
-            throw ServerUtil.parseServerException(e);
         }
     }
 


[42/50] [abbrv] phoenix git commit: PHOENIX-1115 Provide a SQL command to turn tracing on/off(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1115 Provide a SQL command to turn tracing on/off(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0440aca5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0440aca5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0440aca5

Branch: refs/heads/calcite
Commit: 0440aca51916d689f3ea94d4ef708d73887f7994
Parents: 93f5605
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Sat Feb 28 05:49:53 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Sat Feb 28 05:49:53 2015 +0530

----------------------------------------------------------------------
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |  32 ++-
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  10 +
 .../apache/phoenix/compile/TraceQueryPlan.java  | 220 +++++++++++++++++++
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  17 ++
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  24 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  |   4 +
 .../apache/phoenix/parse/TraceStatement.java    |  43 ++++
 .../org/apache/phoenix/trace/util/Tracing.java  |  10 +
 8 files changed, 358 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 201de38..53d22c5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -19,12 +19,15 @@ package org.apache.phoenix.trace;
 
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Collection;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -33,6 +36,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.metrics.Metrics;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
@@ -391,7 +395,33 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         assertAnnotationPresent(TENANT_ID_ATTRIB, tenantId, conn);
         // CurrentSCN is also added as an annotation. Not tested here because it screws up test setup.
     }
-    
+
+    @Test
+    public void testTraceOnOrOff() throws Exception {
+        Connection conn1 = DriverManager.getConnection(getUrl());
+        try{
+            Statement statement = conn1.createStatement();
+            ResultSet  rs = statement.executeQuery("TRACE ON");
+            assertTrue(rs.next());
+            long traceId = ((PhoenixConnection) conn1).getTraceScope().getSpan()
+            .getTraceId();
+            assertEquals(rs.getLong(1), traceId);
+            assertEquals(rs.getLong("trace_id"), traceId);
+            assertFalse(rs.next());
+
+            rs = statement.executeQuery("TRACE OFF");
+            assertTrue(rs.next());
+            assertEquals(rs.getLong(1), traceId);
+            assertEquals(rs.getLong("trace_id"), traceId);
+            assertFalse(rs.next());
+
+            rs = statement.executeQuery("TRACE OFF");
+            assertFalse(rs.next());
+       } finally {
+            conn1.close();
+        }
+    }
+
     private void assertAnnotationPresent(final String annotationKey, final String annotationValue, Connection conn) throws Exception {
         boolean tracingComplete = checkStoredTraces(conn, new TraceChecker(){
             @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index cda93fe..9e843a0 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -33,6 +33,7 @@ tokens
     AS='as';
     OUTER='outer';
     ON='on';
+    OFF='off';
     IN='in';
     GROUP='group';
     HAVING='having';
@@ -107,6 +108,7 @@ tokens
     UPDATE='update';
     STATISTICS='statistics';    
     COLUMNS='columns';
+    TRACE='trace';
 }
 
 
@@ -160,6 +162,7 @@ import org.apache.phoenix.schema.types.PUnsignedTime;
 import org.apache.phoenix.schema.types.PUnsignedTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
+import org.apache.phoenix.trace.util.Tracing;
 }
 
 @lexer::header {
@@ -365,6 +368,7 @@ non_select_node returns [BindableStatement ret]
     |   s=drop_index_node
     |   s=alter_index_node
     |   s=alter_table_node
+    |   s=trace_node
     |	s=create_sequence_node
     |	s=drop_sequence_node
     |   s=update_statistics_node
@@ -498,6 +502,12 @@ alter_index_node returns [AlterIndexStatement ret]
       {ret = factory.alterIndex(factory.namedTable(null, TableName.create(t.getSchemaName(), i.getName())), t.getTableName(), ex!=null, PIndexState.valueOf(SchemaUtil.normalizeIdentifier(s.getText()))); }
     ;
 
+// Parse a trace statement.
+trace_node returns [TraceStatement ret]
+    :   TRACE (flag = ON| flag = OFF)
+       {ret = factory.trace(Tracing.isTraceOn(flag.getText()));}
+    ;
+
 // Parse an alter table statement.
 alter_table_node returns [AlterTableStatement ret]
     :   ALTER (TABLE | v=VIEW) t=from_table_name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
new file mode 100644
index 0000000..9eb5877
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.ParameterMetaData;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.expression.Determinism;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixParameterMetaData;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.metrics.MetricInfo;
+import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.LiteralParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.TraceStatement;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnImpl;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.RowKeyValueAccessor;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.trace.util.Tracing;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.SizedUtil;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.TraceScope;
+
+public class TraceQueryPlan implements QueryPlan {
+
+    private TraceStatement traceStatement = null;
+    private PhoenixStatement stmt = null;
+    private StatementContext context = null;
+    private boolean first = true;
+
+    private static final RowProjector TRACE_PROJECTOR;
+    static {
+        List<ExpressionProjector> projectedColumns = new ArrayList<ExpressionProjector>();
+        PColumn column =
+                new PColumnImpl(PNameFactory.newName(MetricInfo.TRACE.columnName), null,
+                        PLong.INSTANCE, null, null, false, 0, SortOrder.getDefault(), 0, null,
+                        false, null);
+        List<PColumn> columns = new ArrayList<PColumn>();
+        columns.add(column);
+        Expression expression =
+                new RowKeyColumnExpression(column, new RowKeyValueAccessor(columns, 0));
+        projectedColumns.add(new ExpressionProjector(MetricInfo.TRACE.columnName, "", expression,
+                true));
+        int estimatedByteSize = SizedUtil.KEY_VALUE_SIZE + PLong.INSTANCE.getByteSize();
+        TRACE_PROJECTOR = new RowProjector(projectedColumns, estimatedByteSize, false);
+    }
+
+    public TraceQueryPlan(TraceStatement traceStatement, PhoenixStatement stmt) {
+        this.traceStatement = traceStatement;
+        this.stmt = stmt;
+        this.context = new StatementContext(stmt);
+    }
+
+    @Override
+    public StatementContext getContext() {
+        return this.context;
+    }
+
+    @Override
+    public ParameterMetaData getParameterMetaData() {
+        return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
+    }
+
+    @Override
+    public ExplainPlan getExplainPlan() throws SQLException {
+        return ExplainPlan.EMPTY_PLAN;
+    }
+
+    @Override
+    public ResultIterator iterator() throws SQLException {
+        final PhoenixConnection conn = stmt.getConnection();
+        if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
+            return ResultIterator.EMPTY_ITERATOR;
+        }
+        return new ResultIterator() {
+
+            @Override
+            public void close() throws SQLException {
+            }
+
+            @Override
+            public Tuple next() throws SQLException {
+                if(!first) return null;
+                TraceScope traceScope = conn.getTraceScope();
+                if(traceStatement.isTraceOn()) {
+                    if(!conn.getSampler().equals(Sampler.ALWAYS)) {
+                        conn.setSampler(Sampler.ALWAYS);
+                    }
+                    if (traceScope == null) {
+                        traceScope = Tracing.startNewSpan(conn, "Enabling trace");
+                        conn.setTraceScope(traceScope);
+                    }
+                } else {
+                    if (traceScope != null) {
+                        conn.getTraceScope().close();
+                        conn.setTraceScope(null);
+                    }
+                    conn.setSampler(Sampler.NEVER);
+                }
+                if(traceScope == null) return null;
+                first = false;
+                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+                ParseNodeFactory factory = new ParseNodeFactory();
+                LiteralParseNode literal =
+                        factory.literal(traceScope.getSpan().getTraceId());
+                LiteralExpression expression =
+                        LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE,
+                            Determinism.ALWAYS);
+                expression.evaluate(null, ptr);
+                byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                Cell cell =
+                        CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY,
+                            HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(),
+                            Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
+                List<Cell> cells = new ArrayList<Cell>(1);
+                cells.add(cell);
+                return new ResultTuple(Result.create(cells));
+            }
+
+            @Override
+            public void explain(List<String> planSteps) {
+            }
+        };
+    }
+
+    @Override
+    public long getEstimatedSize() {
+        return PLong.INSTANCE.getByteSize();
+    }
+
+    @Override
+    public TableRef getTableRef() {
+        return null;
+    }
+
+    @Override
+    public RowProjector getProjector() {
+        return TRACE_PROJECTOR;
+    }
+
+    @Override
+    public Integer getLimit() {
+        return null;
+    }
+
+    @Override
+    public OrderBy getOrderBy() {
+        return OrderBy.EMPTY_ORDER_BY;
+    }
+
+    @Override
+    public GroupBy getGroupBy() {
+        return GroupBy.EMPTY_GROUP_BY;
+    }
+
+    @Override
+    public List<KeyRange> getSplits() {
+        return Collections.emptyList();
+    }
+
+    @Override
+    public List<List<Scan>> getScans() {
+        return Collections.emptyList();
+    }
+
+    @Override
+    public FilterableStatement getStatement() {
+        return null;
+    }
+
+    @Override
+    public boolean isDegenerate() {
+        return false;
+    }
+
+    @Override
+    public boolean isRowKeyOrdered() {
+        return false;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index de9e323..630c8f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -93,6 +93,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.TraceScope;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Strings;
@@ -130,6 +131,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     private final String datePattern;
     private final String timePattern;
     private final String timestampPattern;
+    private TraceScope traceScope = null;
     
     private boolean isClosed = false;
     private Sampler<?> sampler;
@@ -257,6 +259,10 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         return this.sampler;
     }
     
+    public void setSampler(Sampler<?> sampler) throws SQLException {
+        this.sampler = sampler;
+    }
+
     public Map<String, String> getCustomTracingAnnotations() {
         return customTracingAnnotations;
     }
@@ -408,6 +414,9 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         }
         try {
             try {
+                if (traceScope != null) {
+                    traceScope.close();
+                }
                 closeStatements();
             } finally {
                 services.removeConnection(this);
@@ -776,4 +785,12 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     public KeyValueBuilder getKeyValueBuilder() {
         return this.services.getKeyValueBuilder();
     }
+
+    public TraceScope getTraceScope() {
+        return traceScope;
+    }
+
+    public void setTraceScope(TraceScope traceScope) {
+        this.traceScope = traceScope;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 4ca5bb5..4a23ab7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -55,6 +55,7 @@ import org.apache.phoenix.compile.StatementNormalizer;
 import org.apache.phoenix.compile.StatementPlan;
 import org.apache.phoenix.compile.SubqueryRewriter;
 import org.apache.phoenix.compile.SubselectRewriter;
+import org.apache.phoenix.compile.TraceQueryPlan;
 import org.apache.phoenix.compile.UpsertCompiler;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.BatchUpdateExecution;
@@ -93,6 +94,7 @@ import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.TableNode;
+import org.apache.phoenix.parse.TraceStatement;
 import org.apache.phoenix.parse.UpdateStatisticsStatement;
 import org.apache.phoenix.parse.UpsertStatement;
 import org.apache.phoenix.query.KeyRange;
@@ -124,6 +126,8 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ServerUtil;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -690,6 +694,19 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         }
     }
     
+    private static class ExecutableTraceStatement extends TraceStatement implements CompilableStatement {
+
+        public ExecutableTraceStatement(boolean isTraceOn) {
+            super(isTraceOn);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
+            return new TraceQueryPlan(this, stmt);
+        }
+    }
+
     private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement implements
             CompilableStatement {
         public ExecutableUpdateStatisticsStatement(NamedTableNode table, StatisticsCollectionScope scope) {
@@ -882,7 +899,12 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState state) {
             return new ExecutableAlterIndexStatement(indexTableNode, dataTableName, ifExists, state);
         }
-        
+
+        @Override
+        public TraceStatement trace(boolean isTraceOn) {
+            return new ExecutableTraceStatement(isTraceOn);
+        }
+
         @Override
         public ExplainStatement explain(BindableStatement statement) {
             return new ExecutableExplainStatement(statement);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index ddfaa03..4e8f792 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -325,6 +325,10 @@ public class ParseNodeFactory {
         return new AlterIndexStatement(indexTableNode, dataTableName, ifExists, state);
     }
 
+    public TraceStatement trace(boolean isTraceOn) {
+        return new TraceStatement(isTraceOn);
+    }
+
     public TableName table(String schemaName, String tableName) {
         return TableName.createNormalized(schemaName,tableName);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/parse/TraceStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/TraceStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/TraceStatement.java
new file mode 100644
index 0000000..7460a85
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/TraceStatement.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
+
+public class TraceStatement implements BindableStatement {
+
+    private boolean traceOn = false;
+
+    public TraceStatement(boolean isOn) {
+        this.traceOn = isOn;
+    }
+
+    @Override
+    public int getBindCount() {
+        return 0;
+    }
+
+    @Override
+    public Operation getOperation() {
+        return Operation.QUERY;
+    }
+
+    public boolean isTraceOn() {
+        return traceOn == true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0440aca5/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index a46d4e8..7cd55e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -263,4 +263,14 @@ public class Tracing {
         }
         initialized = true;
     }
+
+    public static boolean isTraceOn(String traceOption) {
+        Preconditions.checkArgument(traceOption != null);
+        if(traceOption.equalsIgnoreCase("ON")) return true;
+        if(traceOption.equalsIgnoreCase("OFF")) return false;
+        else {
+            throw new IllegalArgumentException("Unknown tracing option: " + traceOption);
+        }
+    }
+
 }