You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2013/11/12 19:23:14 UTC

svn commit: r1541190 [6/15] - in /hive/branches/tez: ./ ant/src/org/apache/hadoop/hive/ant/ beeline/ beeline/src/java/org/apache/hive/beeline/ cli/ cli/src/java/org/apache/hadoop/hive/cli/ common/ common/src/java/org/apache/hadoop/hive/common/ common/s...

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java Tue Nov 12 18:23:05 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.StringConverter;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 
@@ -65,14 +66,23 @@ public class GenericUDFUpper extends Gen
     stringConverter = new PrimitiveObjectInspectorConverter.StringConverter(argumentOI);
     PrimitiveCategory inputType = argumentOI.getPrimitiveCategory();
     ObjectInspector outputOI = null;
+    BaseCharTypeInfo typeInfo;
     switch (inputType) {
+      case CHAR:
+        // return type should have same length as the input.
+        returnType = inputType;
+        typeInfo = TypeInfoFactory.getCharTypeInfo(
+            GenericUDFUtils.StringHelper.getFixedStringSizeForType(argumentOI));
+        outputOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
+            typeInfo);
+        break;
       case VARCHAR:
         // return type should have same length as the input.
         returnType = inputType;
-        VarcharTypeInfo varcharTypeInfo = TypeInfoFactory.getVarcharTypeInfo(
+        typeInfo = TypeInfoFactory.getVarcharTypeInfo(
             GenericUDFUtils.StringHelper.getFixedStringSizeForType(argumentOI));
         outputOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-            varcharTypeInfo);
+            typeInfo);
         break;
       default:
         returnType = PrimitiveCategory.STRING;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java Tue Nov 12 18:23:05 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.Fu
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
+import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
 import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
@@ -41,9 +42,9 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -367,6 +368,9 @@ public final class GenericUDFUtils {
         case STRING:
           returnValue = new Text();
           break;
+        case CHAR:
+          returnValue = new HiveCharWritable();
+          break;
         case VARCHAR:
           returnValue = new HiveVarcharWritable();
           break;
@@ -383,6 +387,9 @@ public final class GenericUDFUtils {
         case STRING:
           ((Text)returnValue).set(val);
           return returnValue;
+        case CHAR:
+          ((HiveCharWritable) returnValue).set(val);
+          return returnValue;
         case VARCHAR:
           ((HiveVarcharWritable)returnValue).set(val);
           return returnValue;
@@ -402,8 +409,9 @@ public final class GenericUDFUtils {
         throws UDFArgumentException {
       // TODO: we can support date, int, .. any types which would have a fixed length value
       switch (poi.getPrimitiveCategory()) {
+        case CHAR:
         case VARCHAR:
-          VarcharTypeInfo typeInfo = (VarcharTypeInfo) poi.getTypeInfo();
+          BaseCharTypeInfo typeInfo = (BaseCharTypeInfo) poi.getTypeInfo();
           return typeInfo.getLength();
         default:
           throw new UDFArgumentException("No fixed size for type " + poi.getTypeName());

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java Tue Nov 12 18:23:05 2013
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import java.util.List;
+
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
 /**
@@ -43,6 +46,16 @@ public abstract class GenericUDTF {
   public void configure(MapredContext mapredContext) {
   }
 
+  public StructObjectInspector initialize(StructObjectInspector argOIs)
+      throws UDFArgumentException {
+    List<? extends StructField> inputFields = argOIs.getAllStructFieldRefs();
+    ObjectInspector[] udtfInputOIs = new ObjectInspector[inputFields.size()];
+    for (int i = 0; i < inputFields.size(); i++) {
+      udtfInputOIs[i] = inputFields.get(i).getFieldObjectInspector();
+    }
+    return initialize(udtfInputOIs);
+  }
+
   /**
    * Initialize this GenericUDTF. This will be called only once per instance.
    *
@@ -53,8 +66,11 @@ public abstract class GenericUDTF {
    *         field names are unimportant as they will be overridden by user
    *         supplied column aliases.
    */
-  public abstract StructObjectInspector initialize(ObjectInspector[] argOIs)
-      throws UDFArgumentException;
+  @Deprecated
+  public StructObjectInspector initialize(ObjectInspector[] argOIs)
+      throws UDFArgumentException {
+    throw new IllegalStateException("Should not be called directly");
+  }
 
   /**
    * Give a set of arguments for the UDTF to process.

Modified: hive/branches/tez/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto (original)
+++ hive/branches/tez/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto Tue Nov 12 18:23:05 2013
@@ -152,6 +152,14 @@ message UserMetadataItem {
   required bytes value = 2;
 }
 
+message StripeStatistics {
+  repeated ColumnStatistics colStats = 1;
+}
+
+message Metadata {
+  repeated StripeStatistics stripeStats = 1;
+}
+
 message Footer {
   optional uint64 headerLength = 1;
   optional uint64 contentLength = 2;
@@ -176,6 +184,7 @@ message PostScript {
   optional CompressionKind compression = 2;
   optional uint64 compressionBlockSize = 3;
   repeated uint32 version = 4 [packed = true];
+  optional uint64 metadataLength = 5;
   // Leave this last in the record
   optional string magic = 8000;
 }

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java Tue Nov 12 18:23:05 2013
@@ -62,6 +62,8 @@ public class TestFunctionRegistry extend
   TypeInfo varchar5;
   TypeInfo varchar10;
   TypeInfo maxVarchar;
+  TypeInfo char5;
+  TypeInfo char10;
 
   @Override
   protected void setUp() {
@@ -69,6 +71,8 @@ public class TestFunctionRegistry extend
     maxVarchar = TypeInfoFactory.getPrimitiveTypeInfo(maxVarcharTypeName);
     varchar10 = TypeInfoFactory.getPrimitiveTypeInfo("varchar(10)");
     varchar5 = TypeInfoFactory.getPrimitiveTypeInfo("varchar(5)");
+    char10 = TypeInfoFactory.getPrimitiveTypeInfo("char(10)");
+    char5 = TypeInfoFactory.getPrimitiveTypeInfo("char(5)");
   }
 
   private void implicit(TypeInfo a, TypeInfo b, boolean convertible) {
@@ -95,6 +99,13 @@ public class TestFunctionRegistry extend
     implicit(TypeInfoFactory.stringTypeInfo, varchar20, true);
     implicit(varchar20, varchar10, true);
 
+    implicit(char10, TypeInfoFactory.stringTypeInfo, true);
+    implicit(TypeInfoFactory.stringTypeInfo, char10, true);
+    implicit(char5, char10, true);
+    implicit(char5, varchar10, true);
+    implicit(varchar5, char10, true);
+
+    implicit(TypeInfoFactory.intTypeInfo, char10, true);
     implicit(TypeInfoFactory.intTypeInfo, varchar10, true);
     implicit(TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo, true);
   }
@@ -137,6 +148,8 @@ public class TestFunctionRegistry extend
 
     // String type affinity
     typeAffinity("typeaffinity1", TypeInfoFactory.stringTypeInfo, 1, Text.class);
+    typeAffinity("typeaffinity1", char5, 1, Text.class);
+    typeAffinity("typeaffinity1", varchar5, 1, Text.class);
 
     // Type affinity does not help when multiple methods have the same type affinity.
     typeAffinity("typeaffinity2", TypeInfoFactory.shortTypeInfo, 2, null);
@@ -191,8 +204,9 @@ public class TestFunctionRegistry extend
     verify(TestUDF.class, "one", TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo,
            IntWritable.class, IntWritable.class, false);
 
-    // Passing varchar arguments should prefer the version of evaluate() with Text args.
+    // Passing char/varchar arguments should prefer the version of evaluate() with Text args.
     verify(TestUDF.class, "same", varchar5, varchar10, Text.class, Text.class, false);
+    verify(TestUDF.class, "same", char5, char10, Text.class, Text.class, false);
 
     verify(TestUDF.class, "mismatch", TypeInfoFactory.voidTypeInfo, TypeInfoFactory.intTypeInfo,
            null, null, true);
@@ -214,6 +228,10 @@ public class TestFunctionRegistry extend
 
     common(TypeInfoFactory.stringTypeInfo, varchar10, TypeInfoFactory.stringTypeInfo);
     common(varchar10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
+    common(TypeInfoFactory.stringTypeInfo, char10, TypeInfoFactory.stringTypeInfo);
+    common(char10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
+    // common class between char/varchar is string?
+    common(char5, varchar10, TypeInfoFactory.stringTypeInfo);
   }
 
   private void comparison(TypeInfo a, TypeInfo b, TypeInfo result) {
@@ -238,6 +256,11 @@ public class TestFunctionRegistry extend
     comparison(TypeInfoFactory.stringTypeInfo, varchar10, TypeInfoFactory.stringTypeInfo);
     comparison(varchar10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
     comparison(varchar5, varchar10, varchar10);
+    comparison(TypeInfoFactory.stringTypeInfo, char10, TypeInfoFactory.stringTypeInfo);
+    comparison(char10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
+    comparison(char5, char10, char10);
+    // common comparison class for char/varchar is string?
+    comparison(char10, varchar5, TypeInfoFactory.stringTypeInfo);
   }
 
   /**
@@ -304,6 +327,14 @@ public class TestFunctionRegistry extend
     unionAll(varchar10, varchar5, varchar10);
     unionAll(varchar10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
     unionAll(TypeInfoFactory.stringTypeInfo, varchar10, TypeInfoFactory.stringTypeInfo);
+
+    unionAll(char5, char10, char10);
+    unionAll(char10, char5, char10);
+    unionAll(char10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
+    unionAll(TypeInfoFactory.stringTypeInfo, char10, TypeInfoFactory.stringTypeInfo);
+
+    // common class for char/varchar is string?
+    comparison(char10, varchar5, TypeInfoFactory.stringTypeInfo);
   }
 
   public void testGetTypeInfoForPrimitiveCategory() {
@@ -314,6 +345,14 @@ public class TestFunctionRegistry extend
     assertEquals(varchar10, FunctionRegistry.getTypeInfoForPrimitiveCategory(
         (PrimitiveTypeInfo) varchar10, (PrimitiveTypeInfo) varchar5, PrimitiveCategory.VARCHAR));
 
+    assertEquals(char10, FunctionRegistry.getTypeInfoForPrimitiveCategory(
+        (PrimitiveTypeInfo) char5, (PrimitiveTypeInfo) char10, PrimitiveCategory.CHAR));
+    assertEquals(char10, FunctionRegistry.getTypeInfoForPrimitiveCategory(
+        (PrimitiveTypeInfo) char10, (PrimitiveTypeInfo) char5, PrimitiveCategory.CHAR));
+
+    assertEquals(varchar10, FunctionRegistry.getTypeInfoForPrimitiveCategory(
+        (PrimitiveTypeInfo) varchar5, (PrimitiveTypeInfo) char10, PrimitiveCategory.VARCHAR));
+
     // non-qualified types should simply return the TypeInfo associated with that type
     assertEquals(TypeInfoFactory.stringTypeInfo, FunctionRegistry.getTypeInfoForPrimitiveCategory(
         (PrimitiveTypeInfo) varchar10, (PrimitiveTypeInfo) TypeInfoFactory.stringTypeInfo,

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java Tue Nov 12 18:23:05 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.exec.v
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -49,6 +50,9 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFUnixTimeStampLong;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFYearLong;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.FilterStringColumnInList;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.FilterLongColumnInList;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.FilterDoubleColumnInList;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColUnaryMinus;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterDoubleColLessDoubleScalar;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterDoubleColumnBetween;
@@ -92,6 +96,7 @@ import org.apache.hadoop.hive.ql.udf.UDF
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFLower;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
@@ -938,5 +943,56 @@ public class TestVectorizationContext {
     children1.set(0, new ExprNodeConstantDesc(new Boolean(true)));
     ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
     assertTrue(ve instanceof FilterDoubleColumnNotBetween);
+
+    // timestamp BETWEEN
+    children1.set(0, new ExprNodeConstantDesc(new Boolean(false)));
+    children1.set(1, new ExprNodeColumnDesc(Timestamp.class, "col1", "table", false));
+    children1.set(2, new ExprNodeConstantDesc("2013-11-05 00:00:00.000"));
+    children1.set(3, new ExprNodeConstantDesc("2013-11-06 00:00:00.000"));
+    ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
+    assertTrue(ve instanceof FilterLongColumnBetween);
+
+    // timestamp NOT BETWEEN
+    children1.set(0, new ExprNodeConstantDesc(new Boolean(true)));
+    ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
+    assertTrue(ve instanceof FilterLongColumnNotBetween);
+  }
+
+  @Test
+  public void testInFilters() throws HiveException {
+    ExprNodeColumnDesc col1Expr = new  ExprNodeColumnDesc(String.class, "col1", "table", false);
+    ExprNodeConstantDesc constDesc = new ExprNodeConstantDesc("Alpha");
+    ExprNodeConstantDesc constDesc2 = new ExprNodeConstantDesc("Bravo");
+
+    // string IN
+    GenericUDFIn udf = new GenericUDFIn();
+    ExprNodeGenericFuncDesc exprDesc = new ExprNodeGenericFuncDesc();
+    exprDesc.setGenericUDF(udf);
+    List<ExprNodeDesc> children1 = new ArrayList<ExprNodeDesc>();
+    children1.add(col1Expr);
+    children1.add(constDesc);
+    children1.add(constDesc2);
+    exprDesc.setChildren(children1);
+
+    Map<String, Integer> columnMap = new HashMap<String, Integer>();
+    columnMap.put("col1", 1);
+    columnMap.put("col2", 2);
+    VectorizationContext vc = new VectorizationContext(columnMap, 2);
+    VectorExpression ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
+    assertTrue(ve instanceof FilterStringColumnInList);
+
+    // long IN
+    children1.set(0, new ExprNodeColumnDesc(Long.class, "col1", "table", false));
+    children1.set(1, new ExprNodeConstantDesc(10));
+    children1.set(2, new ExprNodeConstantDesc(20));
+    ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
+    assertTrue(ve instanceof FilterLongColumnInList);
+
+    // double IN
+    children1.set(0, new ExprNodeColumnDesc(Double.class, "col1", "table", false));
+    children1.set(1, new ExprNodeConstantDesc(10d));
+    children1.set(2, new ExprNodeConstantDesc(20d));
+    ve = vc.getVectorExpression(exprDesc, VectorExpressionDescriptor.Mode.FILTER);
+    assertTrue(ve instanceof FilterDoubleColumnInList);
   }
 }

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java Tue Nov 12 18:23:05 2013
@@ -22,9 +22,12 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.sql.Timestamp;
+
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.TimestampUtils;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterDoubleColumnBetween;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterDoubleColumnNotBetween;
@@ -543,4 +546,242 @@ public class TestVectorFilterExpressions
     assertTrue(vrb.selectedInUse);
     assertEquals(0, vrb.selected[0]);
   }
+
+  @Test
+  public void testFilterTimestampBetween() {
+    int seed = 17;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        5, 2, seed);
+    LongColumnVector lcv0 = (LongColumnVector) vrb.cols[0];
+    long startTS = 0; // the epoch
+    long endTS = TimestampUtils.getTimeNanoSec(
+        Timestamp.valueOf("2013-11-05 00:00:00.000000000"));
+
+    Timestamp ts0 = Timestamp.valueOf("1963-11-06 00:00:00.000");
+    lcv0.vector[0] = TimestampUtils.getTimeNanoSec(ts0);
+    Timestamp ts1 = Timestamp.valueOf("1983-11-06 00:00:00.000");
+    lcv0.vector[1] = TimestampUtils.getTimeNanoSec(ts1);
+    Timestamp ts2 = Timestamp.valueOf("2099-11-06 00:00:00.000");
+    lcv0.vector[2] = TimestampUtils.getTimeNanoSec(ts2);
+    vrb.size = 3;
+
+    VectorExpression expr1 = new FilterLongColumnBetween(0, startTS, endTS);
+    expr1.evaluate(vrb);
+    assertEquals(1, vrb.size);
+    assertEquals(true, vrb.selectedInUse);
+    assertEquals(1, vrb.selected[0]);
+  }
+
+  @Test
+  public void testFilterTimestampNotBetween() {
+    int seed = 17;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        5, 2, seed);
+    LongColumnVector lcv0 = (LongColumnVector) vrb.cols[0];
+    long startTS = TimestampUtils.getTimeNanoSec(
+        Timestamp.valueOf("2013-11-05 00:00:00.000000000"));
+    long endTS = TimestampUtils.getTimeNanoSec(
+        Timestamp.valueOf("2013-11-05 00:00:00.000000010"));
+
+    Timestamp ts0 = Timestamp.valueOf("2013-11-04 00:00:00.000000000");
+    lcv0.vector[0] = TimestampUtils.getTimeNanoSec(ts0);
+    Timestamp ts1 = Timestamp.valueOf("2013-11-05 00:00:00.000000002");
+    lcv0.vector[1] = TimestampUtils.getTimeNanoSec(ts1);
+    Timestamp ts2 = Timestamp.valueOf("2099-11-06 00:00:00.000");
+    lcv0.vector[2] = TimestampUtils.getTimeNanoSec(ts2);
+    vrb.size = 3;
+
+    VectorExpression expr1 = new FilterLongColumnNotBetween(0, startTS, endTS);
+    expr1.evaluate(vrb);
+    assertEquals(2, vrb.size);
+    assertEquals(true, vrb.selectedInUse);
+    assertEquals(0, vrb.selected[0]);
+    assertEquals(2, vrb.selected[1]);
+
+  }
+
+  /**
+   * Test the IN filter VectorExpression classes.
+   */
+
+  @Test
+  public void testFilterLongIn() {
+    int seed = 17;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        5, 2, seed);
+    LongColumnVector lcv0 = (LongColumnVector) vrb.cols[0];
+    long[] inList = {5, 20};
+    FilterLongColumnInList f = new FilterLongColumnInList(0);
+    f.setInListValues(inList);
+    VectorExpression expr1 = f;
+
+    // Basic case
+    lcv0.vector[0] = 5;
+    lcv0.vector[1] = 20;
+    lcv0.vector[2] = 17;
+    lcv0.vector[3] = 15;
+    lcv0.vector[4] = 10;
+
+    expr1.evaluate(vrb);
+
+    assertEquals(2, vrb.size);
+    assertTrue(vrb.selectedInUse);
+    assertEquals(0, vrb.selected[0]);
+    assertEquals(1, vrb.selected[1]);
+
+    // With nulls
+    VectorizedRowBatch vrb1 = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        5, 2, seed);
+
+    lcv0 = (LongColumnVector) vrb1.cols[0];
+
+    lcv0.vector[0] = 5;
+    lcv0.vector[1] = 20;
+    lcv0.vector[2] = 17;
+    lcv0.vector[3] = 15;
+    lcv0.vector[4] = 10;
+
+    lcv0.noNulls = false;
+    lcv0.isNull[0] = true;
+    lcv0.isNull[2] = true;
+
+    expr1.evaluate(vrb1);
+    assertEquals(1, vrb1.size);
+    assertTrue(vrb1.selectedInUse);
+    assertEquals(1, vrb1.selected[0]);
+
+    // With nulls and selected
+    VectorizedRowBatch vrb2 = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        7, 2, seed);
+    vrb2.selectedInUse = true;
+    vrb2.selected[0] = 1;
+    vrb2.selected[1] = 2;
+    vrb2.selected[2] = 4;
+    vrb2.size = 3;
+
+    lcv0 = (LongColumnVector) vrb2.cols[0];
+
+    lcv0.vector[0] = 5;
+    lcv0.vector[1] = 20;
+    lcv0.vector[2] = 17;
+    lcv0.vector[3] = 15;
+    lcv0.vector[4] = 10;
+    lcv0.vector[5] = 19;
+    lcv0.vector[6] = 21;
+
+    lcv0.noNulls = false;
+    lcv0.isNull[0] = true;
+    lcv0.isNull[2] = true;
+    lcv0.isNull[5] = true;
+
+    expr1.evaluate(vrb2);
+    assertEquals(1, vrb2.size);
+    assertEquals(1, vrb2.selected[0]);
+
+    // Repeating non null
+    VectorizedRowBatch vrb3 = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        7, 2, seed);
+    lcv0 = (LongColumnVector) vrb3.cols[0];
+
+    lcv0.isRepeating = true;
+    lcv0.vector[0] = 5;
+    lcv0.vector[1] = 20;
+    lcv0.vector[2] = 17;
+    lcv0.vector[3] = 15;
+    lcv0.vector[4] = 10;
+
+    expr1.evaluate(vrb3);
+    assertEquals(7, vrb3.size);
+    assertFalse(vrb3.selectedInUse);
+    assertTrue(lcv0.isRepeating);
+
+    // Repeating null
+    lcv0.noNulls = false;
+    lcv0.vector[0] = 5;
+    lcv0.isNull[0] = true;
+
+    expr1.evaluate(vrb3);
+    assertEquals(0, vrb3.size);
+  }
+
+  @Test
+  public void testFilterDoubleIn() {
+    int seed = 17;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        5, 2, seed);
+    DoubleColumnVector dcv0 = new DoubleColumnVector();
+    vrb.cols[0] = dcv0;
+    double[] inList = {5.0, 20.2};
+    FilterDoubleColumnInList f = new FilterDoubleColumnInList(0);
+    f.setInListValues(inList);
+    VectorExpression expr1 = f;
+
+    // Basic sanity check. Other cases are not skipped because it is similar to the case for Long.
+    dcv0.vector[0] = 5.0;
+    dcv0.vector[1] = 20.2;
+    dcv0.vector[2] = 17.0;
+    dcv0.vector[3] = 15.0;
+    dcv0.vector[4] = 10.0;
+
+    expr1.evaluate(vrb);
+
+    assertEquals(2, vrb.size);
+    assertTrue(vrb.selectedInUse);
+    assertEquals(0, vrb.selected[0]);
+    assertEquals(1, vrb.selected[1]);
+  }
+
+  @Test
+  public void testFilterStringIn() {
+    int seed = 17;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(
+        3, 2, seed);
+    vrb.cols[0] = new BytesColumnVector();
+    BytesColumnVector bcv = (BytesColumnVector) vrb.cols[0];
+
+    bcv.initBuffer();
+    bcv.setVal(0, a, 0, 1);
+    bcv.setVal(1, b, 0, 1);
+    bcv.setVal(2, c, 0, 1);
+
+    VectorExpression expr = new FilterStringColumnInList(0);
+    byte[][] inList = {b, c};
+    ((FilterStringColumnInList) expr).setInListValues(inList);
+
+    // basic test
+    expr.evaluate(vrb);
+
+    assertEquals(2, vrb.size);
+    assertTrue(vrb.selectedInUse);
+    assertEquals(1, vrb.selected[0]);
+    assertEquals(2, vrb.selected[1]);
+
+    // nulls
+    vrb.selectedInUse = false;
+    vrb.size = 3;
+    bcv.noNulls = false;
+    bcv.isNull[2] = true;
+    expr.evaluate(vrb);
+    assertEquals(1, vrb.size);
+    assertEquals(1, vrb.selected[0]);
+    assertTrue(vrb.selectedInUse);
+
+    // repeating
+    vrb.selectedInUse = false;
+    vrb.size = 3;
+    bcv.noNulls = true;
+    bcv.isRepeating = true;
+    expr.evaluate(vrb);
+    assertEquals(0, vrb.size);
+
+    // nulls and repeating
+    vrb.selectedInUse = false;
+    vrb.size = 3;
+    bcv.noNulls = false;
+    bcv.isRepeating = true;
+    bcv.isNull[0] = true;
+    bcv.setVal(0, b, 0, 1);
+    expr.evaluate(vrb);
+    assertEquals(0, vrb.size);
+  }
 }

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java Tue Nov 12 18:23:05 2013
@@ -27,7 +27,6 @@ import java.util.Arrays;
 
 import junit.framework.Assert;
 
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Tue Nov 12 18:23:05 2013
@@ -175,8 +175,6 @@ public class TestSymlinkTextInputFormat 
       CombineHiveInputFormat combineInputFormat = ReflectionUtils.newInstance(
           CombineHiveInputFormat.class, newJob);
 
-      combineInputFormat.validateInput(newJob);
-
       InputSplit[] retSplits = combineInputFormat.getSplits(newJob, 1);
       assertEquals(1, retSplits.length);
     } catch (Exception e) {

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Tue Nov 12 18:23:05 2013
@@ -539,6 +539,70 @@ public class TestOrcFile {
     rows.close();
   }
 
+
+  @Test
+  public void testStripeLevelStats() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (InnerStruct.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    Writer writer = OrcFile.createWriter(testFilePath,
+        OrcFile.writerOptions(conf)
+            .inspector(inspector)
+            .stripeSize(100000)
+            .bufferSize(10000));
+    for (int i = 0; i < 11000; i++) {
+      if (i >= 5000) {
+        if (i >= 10000) {
+          writer.addRow(new InnerStruct(3, "three"));
+        } else {
+          writer.addRow(new InnerStruct(2, "two"));
+        }
+      } else {
+        writer.addRow(new InnerStruct(1, "one"));
+      }
+    }
+
+    writer.close();
+    Reader reader = OrcFile.createReader(fs, testFilePath);
+    Metadata metadata = reader.getMetadata();
+    int numStripes = metadata.getStripeStatistics().size();
+    assertEquals(3, numStripes);
+    StripeStatistics ss1 = metadata.getStripeStatistics().get(0);
+    StripeStatistics ss2 = metadata.getStripeStatistics().get(1);
+    StripeStatistics ss3 = metadata.getStripeStatistics().get(2);
+    assertEquals(4996, ss1.getColumnStatistics()[0].getNumberOfValues());
+    assertEquals(5000, ss2.getColumnStatistics()[0].getNumberOfValues());
+    assertEquals(1004, ss3.getColumnStatistics()[0].getNumberOfValues());
+
+    assertEquals(4996, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getNumberOfValues());
+    assertEquals(5000, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getNumberOfValues());
+    assertEquals(1004, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getNumberOfValues());
+    assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMinimum());
+    assertEquals(1, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMinimum());
+    assertEquals(2, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMinimum());
+    assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMaximum());
+    assertEquals(2, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMaximum());
+    assertEquals(3, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMaximum());
+    assertEquals(4996, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getSum());
+    assertEquals(9996, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getSum());
+    assertEquals(3008, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getSum());
+
+    assertEquals(4996, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getNumberOfValues());
+    assertEquals(5000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getNumberOfValues());
+    assertEquals(1004, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getNumberOfValues());
+    assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMinimum());
+    assertEquals("one", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMinimum());
+    assertEquals("three", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMinimum());
+    assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMaximum());
+    assertEquals("two", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMaximum());
+    assertEquals("two", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMaximum());
+    assertEquals(14988, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getSum());
+    assertEquals(15000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getSum());
+    assertEquals(5012, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getSum());
+  }
+
   @Test
   public void test1() throws Exception {
     ObjectInspector inspector;
@@ -547,12 +611,12 @@ public class TestOrcFile {
           (BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     Writer writer = OrcFile.createWriter(testFilePath,
-                                         OrcFile.writerOptions(conf)
-                                         .inspector(inspector)
-                                         .stripeSize(100000)
-                                         .bufferSize(10000));
+        OrcFile.writerOptions(conf)
+            .inspector(inspector)
+            .stripeSize(100000)
+            .bufferSize(10000));
     writer.addRow(new BigRow(false, (byte) 1, (short) 1024, 65536,
-        Long.MAX_VALUE, (float) 1.0, -15.0, bytes(0,1,2,3,4), "hi",
+        Long.MAX_VALUE, (float) 1.0, -15.0, bytes(0, 1, 2, 3, 4), "hi",
         new MiddleStruct(inner(1, "bye"), inner(2, "sigh")),
         list(inner(3, "good"), inner(4, "bad")),
         map()));
@@ -560,10 +624,12 @@ public class TestOrcFile {
         Long.MAX_VALUE, (float) 2.0, -5.0, bytes(), "bye",
         new MiddleStruct(inner(1, "bye"), inner(2, "sigh")),
         list(inner(100000000, "cat"), inner(-100000, "in"), inner(1234, "hat")),
-        map(inner(5,"chani"), inner(1,"mauddib"))));
+        map(inner(5, "chani"), inner(1, "mauddib"))));
     writer.close();
     Reader reader = OrcFile.createReader(fs, testFilePath);
 
+    Metadata metadata = reader.getMetadata();
+
     // check the stats
     ColumnStatistics[] stats = reader.getStatistics();
     assertEquals(2, stats[1].getNumberOfValues());
@@ -578,14 +644,12 @@ public class TestOrcFile {
     assertEquals("count: 2 min: 1024 max: 2048 sum: 3072",
         stats[3].toString());
 
-    assertEquals(Long.MAX_VALUE,
-        ((IntegerColumnStatistics) stats[5]).getMaximum());
-    assertEquals(Long.MAX_VALUE,
-        ((IntegerColumnStatistics) stats[5]).getMinimum());
-    assertEquals(false, ((IntegerColumnStatistics) stats[5]).isSumDefined());
-    assertEquals("count: 2 min: 9223372036854775807 max: 9223372036854775807",
-        stats[5].toString());
-
+    StripeStatistics ss = metadata.getStripeStatistics().get(0);
+    assertEquals(2, ss.getColumnStatistics()[0].getNumberOfValues());
+    assertEquals(1, ((BooleanColumnStatistics) ss.getColumnStatistics()[1]).getTrueCount());
+    assertEquals(1024, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getMinimum());
+    assertEquals(2048, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getMaximum());
+    assertEquals(3072, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getSum());
     assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum());
     assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum());
     assertEquals(-20.0, ((DoubleColumnStatistics) stats[7]).getSum(), 0.00001);
@@ -935,6 +999,9 @@ public class TestOrcFile {
       }
     }
     assertEquals(3, i);
+    Metadata metadata = reader.getMetadata();
+    int numStripes = metadata.getStripeStatistics().size();
+    assertEquals(1, numStripes);
   }
 
   /**

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/correlationoptimizer1.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/correlationoptimizer1.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/correlationoptimizer1.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/correlationoptimizer1.q Tue Nov 12 18:23:05 2013
@@ -104,7 +104,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       
 set hive.optimize.correlation=false;
 -- If the key of a GroupByOperator is the right table's key in
--- a Left Outer Join, we cannot use a single MR to execute these two 
+-- a Left Outer Join, we cannot use a single MR to execute these two
 -- operators because those keys with a null value are not grouped.
 EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -130,6 +130,29 @@ FROM (SELECT y.key AS key, count(1) AS c
       GROUP BY y.key) tmp;
 
 set hive.optimize.correlation=false;
+-- If a column of the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+set hive.optimize.correlation=true;
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+set hive.optimize.correlation=false;
 -- If the key of a GroupByOperator is the right table's key in
 -- a Right Outer Join, these two operators will be executed in
 -- the same MR job when Correlation Optimizer is enabled.

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/ctas_hadoop20.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/ctas_hadoop20.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/ctas_hadoop20.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/ctas_hadoop20.q Tue Nov 12 18:23:05 2013
@@ -58,11 +58,6 @@ create table nzhang_ctas6 (key string, `
 insert overwrite table nzhang_ctas6 select key, value from src limit 10;
 create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
 
-
-
-
-
-
-
-
-
+create table nzhang_ctas8 as select 3.14BD from nzhang_ctas6 limit 1;
+desc nzhang_ctas8;
+drop table nzhang_ctas8;

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown.q Tue Nov 12 18:23:05 2013
@@ -22,12 +22,17 @@ select value,avg(key + 1) from src group
 
 -- distincts
 explain
-select distinct(key) from src limit 20;
-select distinct(key) from src limit 20;
+select distinct(cdouble) from alltypesorc limit 20;
+select distinct(cdouble) from alltypesorc limit 20;
 
 explain
-select key, count(distinct(key)) from src group by key limit 20;
-select key, count(distinct(key)) from src group by key limit 20;
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+
+-- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20;
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20;
 
 -- limit zero
 explain

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown_negative.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown_negative.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown_negative.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/limit_pushdown_negative.q Tue Nov 12 18:23:05 2013
@@ -16,7 +16,3 @@ CREATE TABLE dest_3(key STRING, c1 INT);
 EXPLAIN FROM src
 INSERT OVERWRITE TABLE dest_2 SELECT value, sum(key) GROUP BY value
 INSERT OVERWRITE TABLE dest_3 SELECT value, sum(key) GROUP BY value limit 20;
-
--- nagative, multi distinct
-explain
-select count(distinct key)+count(distinct value) from src limit 20;

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q Tue Nov 12 18:23:05 2013
@@ -49,6 +49,16 @@ SET hive.optimize.index.filter=false;
 -- hive.optimize.index.filter is set to true. the explain plan should show filter expression
 -- in table scan operator.
 
+SELECT * FROM orc_pred WHERE t<2 limit 1;
+SET hive.optimize.index.filter=true;
+SELECT * FROM orc_pred WHERE t<2 limit 1;
+SET hive.optimize.index.filter=false;
+
+SELECT * FROM orc_pred WHERE t>2 limit 1;
+SET hive.optimize.index.filter=true;
+SELECT * FROM orc_pred WHERE t>2 limit 1;
+SET hive.optimize.index.filter=false;
+
 SELECT SUM(HASH(t)) FROM orc_pred
   WHERE t IS NOT NULL
   AND t < 0

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/stats_only_null.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/stats_only_null.q?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/stats_only_null.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/stats_only_null.q Tue Nov 12 18:23:05 2013
@@ -9,9 +9,9 @@ CREATE TABLE stats_null_part(a double, b
 LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE temps_null;
 
 insert overwrite table stats_null select * from temps_null;
-insert overwrite table stats_null_part partition(dt='2010') select * from temps_null where d <=5;
+insert into table stats_null_part partition(dt='2010') select * from temps_null where d <=5;
 
-insert overwrite table stats_null_part partition(dt='2011') select * from temps_null where d > 5;
+insert into table stats_null_part partition(dt='2011') select * from temps_null where d > 5;
 explain 
 select count(*), count(a), count(b), count(c), count(d) from stats_null;
 explain 
@@ -22,6 +22,9 @@ analyze table stats_null compute statist
 analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d;
 analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d;
 
+describe formatted stats_null_part partition (dt='2010');
+describe formatted stats_null_part partition (dt='2011');
+
 explain 
 select count(*), count(a), count(b), count(c), count(d) from stats_null;
 explain 

Modified: hive/branches/tez/ql/src/test/resources/orc-file-dump-dictionary-threshold.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/resources/orc-file-dump-dictionary-threshold.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/resources/orc-file-dump-dictionary-threshold.out (original)
+++ hive/branches/tez/ql/src/test/resources/orc-file-dump-dictionary-threshold.out Tue Nov 12 18:23:05 2013
@@ -4,7 +4,34 @@ Compression: ZLIB
 Compression size: 10000
 Type: struct<i:int,l:bigint,s:string>
 
-Statistics:
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 4000
+    Column 1: count: 4000 min: -2147115959 max: 2145911404 sum: 71315665983
+    Column 2: count: 4000 min: -9211329013123260308 max: 9217851628057711416
+    Column 3: count: 4000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 sum: 245096
+  Stripe 2:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2147390285 max: 2146838901 sum: 107869424275
+    Column 2: count: 5000 min: -9222178666167296739 max: 9221301751385928177
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984 sum: 972748
+  Stripe 3:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2145928262 max: 2147224606 sum: 38276585043
+    Column 2: count: 5000 min: -9221963099397084326 max: 9222722740629726770
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766 sum: 1753024
+  Stripe 4:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2145378214 max: 2147453086 sum: -43469576640
+    Column 2: count: 5000 min: -9222731174895935707 max: 9222919052987871506
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-127
 82-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788 sum: 2636664
+  Stripe 5:
+    Column 0: count: 2000
+    Column 1: count: 2000 min: -2143595397 max: 2144595861 sum: -64863580335
+    Column 2: count: 2000 min: -9212379634781416464 max: 9208134757538374043
+    Column 3: count: 2000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-86
 20-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 1302706
+
+File Statistics:
   Column 0: count: 21000
   Column 1: count: 21000 min: -2147390285 max: 2147453086 sum: 109128518326
   Column 2: count: 21000 min: -9222731174895935707 max: 9222919052987871506

Modified: hive/branches/tez/ql/src/test/resources/orc-file-dump.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/resources/orc-file-dump.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/resources/orc-file-dump.out (original)
+++ hive/branches/tez/ql/src/test/resources/orc-file-dump.out Tue Nov 12 18:23:05 2013
@@ -4,7 +4,34 @@ Compression: ZLIB
 Compression size: 10000
 Type: struct<i:int,l:bigint,s:string>
 
-Statistics:
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2146021688 max: 2147223299 sum: 515792826
+    Column 2: count: 5000 min: -9218592812243954469 max: 9221614132680747961
+    Column 3: count: 5000 min: Darkness, max: worst sum: 19280
+  Stripe 2:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2146733128 max: 2147001622 sum: 7673427
+    Column 2: count: 5000 min: -9220818777591257749 max: 9222259462014003839
+    Column 3: count: 5000 min: Darkness, max: worst sum: 19504
+  Stripe 3:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2146993718 max: 2147378179 sum: 132660742551
+    Column 2: count: 5000 min: -9218342074710552826 max: 9222303228623055266
+    Column 3: count: 5000 min: Darkness, max: worst sum: 19641
+  Stripe 4:
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2146658006 max: 2145520931 sum: 8533549236
+    Column 2: count: 5000 min: -9222758097219661129 max: 9221043130193737406
+    Column 3: count: 5000 min: Darkness, max: worst sum: 19470
+  Stripe 5:
+    Column 0: count: 1000
+    Column 1: count: 1000 min: -2146245500 max: 2146378640 sum: 51299706363
+    Column 2: count: 1000 min: -9208193203370316142 max: 9218567213558056476
+    Column 3: count: 1000 min: Darkness, max: worst sum: 3866
+
+File Statistics:
   Column 0: count: 21000
   Column 1: count: 21000 min: -2146993718 max: 2147378179 sum: 193017464403
   Column 2: count: 21000 min: -9222758097219661129 max: 9222303228623055266

Modified: hive/branches/tez/ql/src/test/results/beelinepositive/udf_round.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/beelinepositive/udf_round.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/beelinepositive/udf_round.q.out (original)
+++ hive/branches/tez/ql/src/test/results/beelinepositive/udf_round.q.out Tue Nov 12 18:23:05 2013
@@ -16,7 +16,7 @@ Saving all output to "!!{outputDirectory
 round(1.0/0.0, 0), round(power(-1.0,0.5), 0) 
 FROM src LIMIT 1;
 '_c0','_c1','_c2','_c3','_c4'
-'','','','Infinity','NaN'
+'','','','','NaN'
 1 row selected 
 >>>  
 >>>  SELECT 

Modified: hive/branches/tez/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out Tue Nov 12 18:23:05 2013
@@ -71,7 +71,10 @@ Table:              	analyze_srcpart_par
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	5293                
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/tez/ql/src/test/results/clientnegative/unset_table_property.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientnegative/unset_table_property.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientnegative/unset_table_property.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientnegative/unset_table_property.q.out Tue Nov 12 18:23:05 2013
@@ -21,8 +21,11 @@ numFiles	0
 c	3
 #### A masked pattern was here ####
 a	1
+COLUMN_STATS_ACCURATE	false
 #### A masked pattern was here ####
+numRows	-1
 totalSize	0
+rawDataSize	-1
 FAILED: SemanticException [Error 10215]: Please use the following syntax if not sure whether the property existed or not:
 ALTER TABLE tableName UNSET TBLPROPERTIES IF EXISTS (key1, key2, ...)
  The following property z does not exist in testtable

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out Tue Nov 12 18:23:05 2013
@@ -79,6 +79,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -147,6 +148,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -280,6 +282,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -372,10 +375,11 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	5812                
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out Tue Nov 12 18:23:05 2013
@@ -77,6 +77,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -179,6 +180,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -289,6 +291,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -407,6 +410,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -533,6 +537,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -667,6 +672,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -809,6 +815,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -959,6 +966,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -1117,6 +1125,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out Tue Nov 12 18:23:05 2013
@@ -45,8 +45,11 @@ Table:              	alter_table_partiti
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -95,8 +98,11 @@ Table:              	alter_table_partiti
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -145,8 +151,11 @@ Table:              	alter_table_partiti
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_coltype.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_partition_coltype.q.out Tue Nov 12 18:23:05 2013
@@ -138,6 +138,7 @@ STAGE PLANS:
               dt 100x
               ts 6:30pm
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -333,6 +334,7 @@ STAGE PLANS:
               dt 100x
               ts 6:30pm
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -486,6 +488,7 @@ STAGE PLANS:
               dt 10
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -527,6 +530,7 @@ STAGE PLANS:
               dt 100x
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -568,6 +572,7 @@ STAGE PLANS:
               dt 100x
               ts 6:30pm
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -808,6 +813,7 @@ STAGE PLANS:
               dt 10
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -849,6 +855,7 @@ STAGE PLANS:
               dt 100x
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -890,6 +897,7 @@ STAGE PLANS:
               dt 100x
               ts 6:30pm
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -1026,6 +1034,7 @@ STAGE PLANS:
               dt 10
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -1067,6 +1076,7 @@ STAGE PLANS:
               dt 100x
               ts 3.0
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string
@@ -1108,6 +1118,7 @@ STAGE PLANS:
               dt 100x
               ts 6:30pm
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns key,value
               columns.types string:string

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_skewed_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_skewed_table.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_skewed_table.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_skewed_table.q.out Tue Nov 12 18:23:05 2013
@@ -57,8 +57,11 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -141,8 +144,11 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -227,8 +233,11 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out Tue Nov 12 18:23:05 2013
@@ -58,9 +58,12 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	false               
 	SORTBUCKETCOLSPREFIX	TRUE                
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/tez/ql/src/test/results/clientpositive/alter_table_serde2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/alter_table_serde2.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/alter_table_serde2.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/alter_table_serde2.q.out Tue Nov 12 18:23:05 2013
@@ -77,6 +77,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -182,6 +183,7 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

Modified: hive/branches/tez/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out Tue Nov 12 18:23:05 2013
@@ -153,6 +153,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns dealid,date,time,cityid,userid
               columns.types int:string:string:int:int
@@ -171,6 +172,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns dealid,date,time,cityid,userid
                 columns.types int:string:string:int:int
@@ -284,6 +286,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns dealid,date,time,cityid,userid
               columns.types int:string:string:int:int
@@ -302,6 +305,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns dealid,date,time,cityid,userid
                 columns.types int:string:string:int:int
@@ -413,6 +417,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns dealid,date,time,cityid,userid
               columns.types int:string:string:int:int
@@ -431,6 +436,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns dealid,date,time,cityid,userid
                 columns.types int:string:string:int:int
@@ -540,6 +546,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns userid
               columns.types int
@@ -558,6 +565,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns userid
                 columns.types int

Modified: hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out Tue Nov 12 18:23:05 2013
@@ -133,6 +133,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -176,6 +177,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -324,6 +326,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -367,6 +370,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -494,6 +498,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -588,6 +593,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -631,6 +637,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -673,6 +680,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -759,6 +767,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 4
                     bucket_field_name key
                     columns key,value
@@ -801,6 +810,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-09
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 4
                     bucket_field_name key
                     columns key,value
@@ -895,6 +905,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -938,6 +949,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -980,6 +992,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -1093,6 +1106,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -1136,6 +1150,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value

Modified: hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out Tue Nov 12 18:23:05 2013
@@ -102,6 +102,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -195,6 +196,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -237,6 +239,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -279,6 +282,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -407,6 +411,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -500,6 +505,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -542,6 +548,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -584,6 +591,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -705,6 +713,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -806,6 +815,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -848,6 +858,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value

Modified: hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out Tue Nov 12 18:23:05 2013
@@ -130,6 +130,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -176,6 +177,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 3
                     bucket_field_name key
                     columns key,value
@@ -222,6 +224,7 @@ STAGE PLANS:
                   partition values:
                     ds 2008-04-08
                   properties:
+                    COLUMN_STATS_ACCURATE true
                     bucket_count 3
                     bucket_field_name key
                     columns key,value
@@ -360,6 +363,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -403,6 +407,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-09
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -446,6 +451,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 3
               bucket_field_name key
               columns key,value
@@ -489,6 +495,7 @@ STAGE PLANS:
             partition values:
               ds 2008-04-08
             properties:
+              COLUMN_STATS_ACCURATE true
               bucket_count 2
               bucket_field_name key
               columns key,value