You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/09/08 06:38:26 UTC

svn commit: r1623263 [26/28] - in /hive/branches/spark: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ ant/src/org/apache/hadoop/hive/ant/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/ bin/...

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Mon Sep  8 04:38:17 2014
@@ -67,6 +67,7 @@ import org.apache.hadoop.hive.ql.io.Comb
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -1586,7 +1587,7 @@ public class TestInputOutputFormat {
     types.add(builder.build());
     types.add(builder.build());
     types.add(builder.build());
-    SearchArgument isNull = SearchArgument.FACTORY.newBuilder()
+    SearchArgument isNull = SearchArgumentFactory.newBuilder()
         .startAnd().isNull("cost").end().build();
     conf.set(OrcInputFormat.SARG_PUSHDOWN, isNull.toKryo());
     conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR,

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Mon Sep  8 04:38:17 2014
@@ -42,7 +42,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.io.orc.OrcFile.Version;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
@@ -1684,7 +1686,7 @@ public class TestOrcFile {
   }
 
   @Test
-  public void testMemoryManagement() throws Exception {
+  public void testMemoryManagementV11() throws Exception {
     ObjectInspector inspector;
     synchronized (TestOrcFile.class) {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
@@ -1699,7 +1701,8 @@ public class TestOrcFile {
                                          .stripeSize(50000)
                                          .bufferSize(100)
                                          .rowIndexStride(0)
-                                         .memory(memory));
+                                         .memory(memory)
+                                         .version(Version.V_0_11));
     assertEquals(testFilePath, memory.path);
     for(int i=0; i < 2500; ++i) {
       writer.addRow(new InnerStruct(i*300, Integer.toHexString(10*i)));
@@ -1719,6 +1722,45 @@ public class TestOrcFile {
   }
 
   @Test
+  public void testMemoryManagementV12() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (InnerStruct.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    MyMemoryManager memory = new MyMemoryManager(conf, 10000, 0.1);
+    Writer writer = OrcFile.createWriter(testFilePath,
+                                         OrcFile.writerOptions(conf)
+                                         .inspector(inspector)
+                                         .compress(CompressionKind.NONE)
+                                         .stripeSize(50000)
+                                         .bufferSize(100)
+                                         .rowIndexStride(0)
+                                         .memory(memory)
+                                         .version(Version.V_0_12));
+    assertEquals(testFilePath, memory.path);
+    for(int i=0; i < 2500; ++i) {
+      writer.addRow(new InnerStruct(i*300, Integer.toHexString(10*i)));
+    }
+    writer.close();
+    assertEquals(null, memory.path);
+    Reader reader = OrcFile.createReader(testFilePath,
+        OrcFile.readerOptions(conf).filesystem(fs));
+    int i = 0;
+    for(StripeInformation stripe: reader.getStripes()) {
+      i += 1;
+      assertTrue("stripe " + i + " is too long at " + stripe.getDataLength(),
+          stripe.getDataLength() < 5000);
+    }
+    // with HIVE-7832, the dictionaries will be disabled after writing the first
+    // stripe as there are too many distinct values. Hence only 3 stripes as
+    // compared to 25 stripes in version 0.11 (above test case)
+    assertEquals(3, i);
+    assertEquals(2500, reader.getNumberOfRows());
+  }
+
+  @Test
   public void testPredicatePushdown() throws Exception {
     ObjectInspector inspector;
     synchronized (TestOrcFile.class) {
@@ -1736,7 +1778,7 @@ public class TestOrcFile {
         OrcFile.readerOptions(conf).filesystem(fs));
     assertEquals(3500, reader.getNumberOfRows());
 
-    SearchArgument sarg = SearchArgument.FACTORY.newBuilder()
+    SearchArgument sarg = SearchArgumentFactory.newBuilder()
         .startAnd()
           .startNot()
              .lessThan("int1", 300000)
@@ -1760,7 +1802,7 @@ public class TestOrcFile {
     assertEquals(3500, rows.getRowNumber());
 
     // look through the file with no rows selected
-    sarg = SearchArgument.FACTORY.newBuilder()
+    sarg = SearchArgumentFactory.newBuilder()
         .startAnd()
           .lessThan("int1", 0)
         .end()
@@ -1773,7 +1815,7 @@ public class TestOrcFile {
     assertTrue(!rows.hasNext());
 
     // select first 100 and last 100 rows
-    sarg = SearchArgument.FACTORY.newBuilder()
+    sarg = SearchArgumentFactory.newBuilder()
         .startOr()
           .lessThan("int1", 300 * 100)
           .startNot()

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java Mon Sep  8 04:38:17 2014
@@ -56,6 +56,7 @@ import java.util.List;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
 
 public class TestOrcRawRecordMerger {
 
@@ -454,9 +455,16 @@ public class TestOrcRawRecordMerger {
 
   static class MyRow {
     Text col1;
+    RecordIdentifier ROW__ID;
+
     MyRow(String val) {
       col1 = new Text(val);
     }
+
+    MyRow(String val, long rowId, long origTxn, int bucket) {
+      col1 = new Text(val);
+      ROW__ID = new RecordIdentifier(origTxn, bucket, rowId);
+    }
   }
 
   static String getValue(OrcStruct event) {
@@ -533,12 +541,12 @@ public class TestOrcRawRecordMerger {
 
     // write a delta
     ru = of.getRecordUpdater(root, options.writingBase(false)
-        .minimumTransactionId(200).maximumTransactionId(200));
-    ru.update(200, 0, 0, new MyRow("update 1"));
-    ru.update(200, 0, 2, new MyRow("update 2"));
-    ru.update(200, 0, 3, new MyRow("update 3"));
-    ru.delete(200, 0, 7);
-    ru.delete(200, 0, 8);
+        .minimumTransactionId(200).maximumTransactionId(200).recordIdColumn(1));
+    ru.update(200, new MyRow("update 1", 0, 0, BUCKET));
+    ru.update(200, new MyRow("update 2", 2, 0, BUCKET));
+    ru.update(200, new MyRow("update 3", 3, 0, BUCKET));
+    ru.delete(200, new MyRow("", 7, 0, BUCKET));
+    ru.delete(200, new MyRow("", 8, 0, BUCKET));
     ru.close(false);
 
     ValidTxnList txnList = new ValidTxnListImpl("200:");
@@ -607,13 +615,13 @@ public class TestOrcRawRecordMerger {
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
     assertEquals(new ReaderKey(0, BUCKET, 7, 200), id);
-    assertEquals(null, OrcRecordUpdater.getRow(event));
+    assertNull(OrcRecordUpdater.getRow(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
     assertEquals(new ReaderKey(0, BUCKET, 8, 200), id);
-    assertEquals(null, OrcRecordUpdater.getRow(event));
+    assertNull(OrcRecordUpdater.getRow(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
@@ -693,7 +701,7 @@ public class TestOrcRawRecordMerger {
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
     assertEquals(new ReaderKey(0, BUCKET, 7, 200), id);
-    assertEquals(null, OrcRecordUpdater.getRow(event));
+    assertNull(OrcRecordUpdater.getRow(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
@@ -705,8 +713,7 @@ public class TestOrcRawRecordMerger {
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
     assertEquals(new ReaderKey(0, BUCKET, 8, 200), id);
-    assertEquals(null, OrcRecordUpdater.getRow(event));
-
+    assertNull(OrcRecordUpdater.getRow(event));
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
@@ -747,6 +754,7 @@ public class TestOrcRawRecordMerger {
     Text mytext;
     float myfloat;
     double mydouble;
+    RecordIdentifier ROW__ID;
 
     BigRow(int myint, long mylong, String mytext, float myfloat, double mydouble) {
       this.myint = myint;
@@ -754,6 +762,21 @@ public class TestOrcRawRecordMerger {
       this.mytext = new Text(mytext);
       this.myfloat = myfloat;
       this.mydouble = mydouble;
+      ROW__ID = null;
+    }
+
+    BigRow(int myint, long mylong, String mytext, float myfloat, double mydouble,
+                    long rowId, long origTxn, int bucket) {
+      this.myint = myint;
+      this.mylong = mylong;
+      this.mytext = new Text(mytext);
+      this.myfloat = myfloat;
+      this.mydouble = mydouble;
+      ROW__ID = new RecordIdentifier(origTxn, bucket, rowId);
+    }
+
+    BigRow(long rowId, long origTxn, int bucket) {
+      ROW__ID = new RecordIdentifier(origTxn, bucket, rowId);
     }
   }
 
@@ -802,16 +825,16 @@ public class TestOrcRawRecordMerger {
     // write a delta
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
         .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
-        .bucket(BUCKET).inspector(inspector).filesystem(fs);
+        .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5);
     RecordUpdater ru = of.getRecordUpdater(root, options);
     values = new String[]{"0.0", null, null, "1.1", null, null, null,
         "ignore.7"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
+        ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
       }
     }
-    ru.delete(100, 0, 9);
+    ru.delete(100, new BigRow(9, 0, BUCKET));
     ru.close(false);
 
     // write a delta
@@ -820,10 +843,10 @@ public class TestOrcRawRecordMerger {
     values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
+        ru.update(2, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
       }
     }
-    ru.delete(100, 0, 8);
+    ru.delete(100, new BigRow(8, 0, BUCKET));
     ru.close(false);
 
     InputFormat inf = new OrcInputFormat();
@@ -902,16 +925,16 @@ public class TestOrcRawRecordMerger {
     ru.close(false);
 
     // write a delta
-    options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
+    options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1).recordIdColumn(5);
     ru = of.getRecordUpdater(root, options);
     values = new String[]{"0.0", null, null, "1.1", null, null, null,
         "ignore.7"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
+        ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
       }
     }
-    ru.delete(100, 0, 9);
+    ru.delete(100, new BigRow(9, 0, BUCKET));
     ru.close(false);
 
     // write a delta
@@ -920,10 +943,10 @@ public class TestOrcRawRecordMerger {
     values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
+        ru.update(2, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
       }
     }
-    ru.delete(100, 0, 8);
+    ru.delete(100, new BigRow(8, 0, BUCKET));
     ru.close(false);
 
     InputFormat inf = new OrcInputFormat();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java Mon Sep  8 04:38:17 2014
@@ -23,8 +23,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
-import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.io.IntWritable;
@@ -37,6 +37,7 @@ import java.io.DataInputStream;
 import java.io.File;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 
 public class TestOrcRecordUpdater {
 
@@ -64,9 +65,18 @@ public class TestOrcRecordUpdater {
 
   static class MyRow {
     Text field;
+    RecordIdentifier ROW__ID;
+
     MyRow(String val) {
       field = new Text(val);
+      ROW__ID = null;
+    }
+
+    MyRow(String val, long rowId, long origTxn, int bucket) {
+      field = new Text(val);
+      ROW__ID = new RecordIdentifier(origTxn, bucket, rowId);
     }
+
   }
 
   @Test
@@ -178,17 +188,19 @@ public class TestOrcRecordUpdater {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
+    int bucket = 20;
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
         .filesystem(fs)
-        .bucket(20)
+        .bucket(bucket)
         .writingBase(false)
         .minimumTransactionId(100)
         .maximumTransactionId(100)
         .inspector(inspector)
-        .reporter(Reporter.NULL);
+        .reporter(Reporter.NULL)
+        .recordIdColumn(1);
     RecordUpdater updater = new OrcRecordUpdater(root, options);
-    updater.update(100, 10, 30, new MyRow("update"));
-    updater.delete(100, 40, 60);
+    updater.update(100, new MyRow("update", 30, 10, bucket));
+    updater.delete(100, new MyRow("", 60, 40, bucket));
     assertEquals(-1L, updater.getStats().getRowCount());
     updater.close(false);
     Path bucketPath = AcidUtils.createFilename(root, options);
@@ -216,7 +228,7 @@ public class TestOrcRecordUpdater {
     assertEquals(40, OrcRecordUpdater.getOriginalTransaction(row));
     assertEquals(20, OrcRecordUpdater.getBucket(row));
     assertEquals(60, OrcRecordUpdater.getRowId(row));
-    assertEquals(null, OrcRecordUpdater.getRow(row));
+    assertNull(OrcRecordUpdater.getRow(row));
     assertEquals(false, rows.hasNext());
   }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java Mon Sep  8 04:38:17 2014
@@ -41,7 +41,6 @@ import org.apache.hadoop.hive.ql.io.sarg
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
 import org.apache.hadoop.hive.ql.io.sarg.TestSearchArgumentImpl;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.junit.Test;
 import org.mockito.MockSettings;

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java Mon Sep  8 04:38:17 2014
@@ -22,6 +22,8 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl.ExpressionBuilder;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl.ExpressionTree;
@@ -743,7 +745,7 @@ public class TestSearchArgumentImpl {
         "</java> \n";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
@@ -1011,7 +1013,7 @@ public class TestSearchArgumentImpl {
         "</java> \n";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(4, leaves.size());
 
@@ -1430,7 +1432,7 @@ public class TestSearchArgumentImpl {
         "</java> \n";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(3, leaves.size());
 
@@ -1640,7 +1642,7 @@ public class TestSearchArgumentImpl {
         "\n";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(3, leaves.size());
 
@@ -1895,7 +1897,7 @@ public class TestSearchArgumentImpl {
         "</java> \n";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
@@ -2372,7 +2374,7 @@ public class TestSearchArgumentImpl {
         "</java>";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
@@ -2506,7 +2508,7 @@ public class TestSearchArgumentImpl {
         "</java> ";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(0, leaves.size());
 
@@ -2633,7 +2635,7 @@ public class TestSearchArgumentImpl {
         "</java> ";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(0, leaves.size());
 
@@ -2758,7 +2760,7 @@ public class TestSearchArgumentImpl {
         "</java>";
 
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) SearchArgument.FACTORY.create(getFuncDesc(exprStr));
+        (SearchArgumentImpl) SearchArgumentFactory.create(getFuncDesc(exprStr));
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
@@ -2788,7 +2790,7 @@ public class TestSearchArgumentImpl {
   @Test
   public void testBuilder() throws Exception {
     SearchArgument sarg =
-        SearchArgument.FACTORY.newBuilder()
+        SearchArgumentFactory.newBuilder()
             .startAnd()
               .lessThan("x", 10)
               .lessThanEquals("y", "hi")
@@ -2799,7 +2801,7 @@ public class TestSearchArgumentImpl {
         "leaf-1 = (LESS_THAN_EQUALS y hi)\n" +
         "leaf-2 = (EQUALS z 1.0)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
-    sarg = SearchArgument.FACTORY.newBuilder()
+    sarg = SearchArgumentFactory.newBuilder()
         .startNot()
            .startOr()
              .isNull("x")
@@ -2819,7 +2821,7 @@ public class TestSearchArgumentImpl {
   @Test
   public void testBuilderComplexTypes() throws Exception {
     SearchArgument sarg =
-        SearchArgument.FACTORY.newBuilder()
+        SearchArgumentFactory.newBuilder()
             .startAnd()
               .lessThan("x", new DateWritable(10))
               .lessThanEquals("y", new HiveChar("hi", 10))
@@ -2831,7 +2833,7 @@ public class TestSearchArgumentImpl {
         "leaf-2 = (EQUALS z 1.0)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
 
-    sarg = SearchArgument.FACTORY.newBuilder()
+    sarg = SearchArgumentFactory.newBuilder()
         .startNot()
            .startOr()
              .isNull("x")
@@ -2851,7 +2853,7 @@ public class TestSearchArgumentImpl {
   @Test
   public void testBuilderComplexTypes2() throws Exception {
     SearchArgument sarg =
-        SearchArgument.FACTORY.newBuilder()
+        SearchArgumentFactory.newBuilder()
             .startAnd()
             .lessThan("x", new DateWritable(10))
             .lessThanEquals("y", new HiveChar("hi", 10))
@@ -2863,7 +2865,7 @@ public class TestSearchArgumentImpl {
         "leaf-2 = (EQUALS z 1.0)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
 
-    sarg = SearchArgument.FACTORY.newBuilder()
+    sarg = SearchArgumentFactory.newBuilder()
         .startNot()
         .startOr()
         .isNull("x")

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java Mon Sep  8 04:38:17 2014
@@ -20,18 +20,29 @@ package org.apache.hadoop.hive.ql.sessio
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Method;
 import java.util.Arrays;
 import java.util.Collection;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hive.common.util.HiveTestUtils;
+import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import com.google.common.io.Files;
+
 /**
  * Test SessionState
  */
@@ -39,6 +50,14 @@ import org.junit.runners.Parameterized.P
 public class TestSessionState {
 
   private final boolean prewarm;
+  private final static String clazzDistFileName = "SessionStateTest.jar.v1";
+  private final static String clazzV2FileName = "SessionStateTest.jar.v2";
+  private final static String reloadClazzFileName = "reloadingClazz.jar";
+  private final static String reloadClazzName = "org.apache.test.RefreshedJarClass";
+  private final static String versionMethodName = "version";
+  private static String hiveReloadPath;
+  private File reloadFolder;
+  public static final Log LOG = LogFactory.getLog(TestSessionState.class);
 
   public TestSessionState(Boolean mode) {
     this.prewarm = mode.booleanValue();
@@ -50,8 +69,20 @@ public class TestSessionState {
   }
 
   @Before
-  public void setup() {
+  public void setUp() {
     HiveConf conf = new HiveConf();
+    String tmp = System.getProperty("java.io.tmpdir");
+    File tmpDir = new File(tmp);
+    if (!tmpDir.exists()) {
+      tmpDir.mkdir();
+    }
+    hiveReloadPath = Files.createTempDir().getAbsolutePath();
+    // create the reloading folder to place jar files if not exist
+    reloadFolder = new File(hiveReloadPath);
+    if (!reloadFolder.exists()) {
+      reloadFolder.mkdir();
+    }
+
     if (prewarm) {
       HiveConf.setBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED, true);
       HiveConf.setIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS, 1);
@@ -59,6 +90,11 @@ public class TestSessionState {
     SessionState.start(conf);
   }
 
+  @After
+  public void tearDown(){
+    FileUtils.deleteQuietly(reloadFolder);
+  }
+
   /**
    * test set and get db
    */
@@ -129,4 +165,81 @@ public class TestSessionState {
     assertEquals("Other thread loader and current thread loader",
         otherThread.loader, Thread.currentThread().getContextClassLoader());
   }
+
+  private String getReloadedClazzVersion(ClassLoader cl) throws Exception {
+    Class addedClazz = Class.forName(reloadClazzName, true, cl);
+    Method versionMethod = addedClazz.getMethod(versionMethodName);
+    return (String) versionMethod.invoke(addedClazz.newInstance());
+  }
+
+  @Test
+  public void testReloadAuxJars2() {
+    HiveConf conf = new HiveConf();
+    HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath);
+    SessionState ss = new SessionState(conf);
+    SessionState.start(ss);
+
+    ss = SessionState.get();
+    File dist = null;
+    try {
+      dist = new File(reloadFolder.getAbsolutePath() + File.separator + reloadClazzFileName);
+      Files.copy(new File(HiveTestUtils.getFileFromClasspath(clazzDistFileName)), dist);
+      ss.reloadAuxJars();
+      Assert.assertEquals("version1", getReloadedClazzVersion(ss.getConf().getClassLoader()));
+    } catch (Exception e) {
+      LOG.error("Reload auxiliary jar test fail with message: ", e);
+      Assert.fail(e.getMessage());
+    } finally {
+      FileUtils.deleteQuietly(dist);
+      try {
+        ss.close();
+      } catch (IOException ioException) {
+        Assert.fail(ioException.getMessage());
+        LOG.error("Fail to close the created session: ", ioException);
+      }
+    }
+  }
+
+  @Test
+  public void testReloadExistingAuxJars2() {
+    HiveConf conf = new HiveConf();
+    HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath);
+
+    SessionState ss = new SessionState(conf);
+    SessionState.start(ss);
+    File dist = null;
+
+    try {
+      ss = SessionState.get();
+
+      LOG.info("copy jar file 1");
+      dist = new File(reloadFolder.getAbsolutePath() + File.separator + reloadClazzFileName);
+
+      Files.copy(new File(HiveTestUtils.getFileFromClasspath(clazzDistFileName)), dist);
+      ss.reloadAuxJars();
+
+      Assert.assertEquals("version1", getReloadedClazzVersion(ss.getConf().getClassLoader()));
+
+      LOG.info("copy jar file 2");
+      FileUtils.deleteQuietly(dist);
+      Files.copy(new File(HiveTestUtils.getFileFromClasspath(clazzV2FileName)), dist);
+
+      ss.reloadAuxJars();
+      Assert.assertEquals("version2", getReloadedClazzVersion(ss.getConf().getClassLoader()));
+
+      FileUtils.deleteQuietly(dist);
+      ss.reloadAuxJars();
+    } catch (Exception e) {
+      LOG.error("refresh existing jar file case failed with message: ", e);
+      Assert.fail(e.getMessage());
+    } finally {
+      FileUtils.deleteQuietly(dist);
+      try {
+        ss.close();
+      } catch (IOException ioException) {
+        Assert.fail(ioException.getMessage());
+        LOG.error("Fail to close the created session: ", ioException);
+      }
+    }
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java Mon Sep  8 04:38:17 2014
@@ -30,6 +30,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Tests for the compactor Initiator thread.
@@ -89,7 +90,7 @@ public class TestInitiator extends Compa
     txnHandler.findNextToCompact("nosuchhost-193892");
 
     HiveConf conf = new HiveConf();
-    HiveConf.setLongVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT, 1L);
+    conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT, 1L, TimeUnit.MILLISECONDS);
 
     startInitiator(conf);
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java Mon Sep  8 04:38:17 2014
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
-import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -280,7 +279,7 @@ public class TestWorker extends Compacto
     // There should still now be 5 directories in the location
     FileSystem fs = FileSystem.get(conf);
     FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
-for (int i = 0; i < stat.length; i++) System.out.println("HERE: " + stat[i].getPath().toString());
+    for (int i = 0; i < stat.length; i++) System.out.println("HERE: " + stat[i].getPath().toString());
     Assert.assertEquals(4, stat.length);
 
     // Find the new delta file and make sure it has the right contents
@@ -507,7 +506,7 @@ for (int i = 0; i < stat.length; i++) Sy
     Assert.assertEquals(1, compacts.size());
     Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
 
-    // There should still now be 5 directories in the location
+    // There should now be 3 directories in the location
     FileSystem fs = FileSystem.get(conf);
     FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
     Assert.assertEquals(3, stat.length);

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q Mon Sep  8 04:38:17 2014
@@ -67,3 +67,33 @@ explain select year from loc_orc group b
 -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
 explain select state,locid from loc_orc group by state,locid with cube;
 
+set hive.stats.fetch.column.stats=false;
+set hive.stats.map.parallelism=1;
+
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid with cube;
+
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid with rollup;
+
+-- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state));
+
+-- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+
+set hive.stats.map.parallelism=10;
+
+-- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+explain select year from loc_orc group by year;
+
+-- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+explain select state,locid from loc_orc group by state,locid with cube;
+

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q Mon Sep  8 04:38:17 2014
@@ -1,4 +1,4 @@
-set hive.stats.dbclass=counter;
+set hive.stats.dbclass=fs;
 set hive.stats.autogather=true;
 
 DROP TABLE lineitem;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/show_tables.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/show_tables.q?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/show_tables.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/show_tables.q Mon Sep  8 04:38:17 2014
@@ -27,6 +27,15 @@ SHOW TABLES IN default;
 SHOW TABLES IN test_db "test*";
 SHOW TABLES IN test_db LIKE "nomatch";
 
+-- SHOW TABLE EXTENDED basic syntax tests and wildcard
+SHOW TABLE EXTENDED IN test_db LIKE foo;
+SHOW TABLE EXTENDED IN test_db LIKE "foo";
+SHOW TABLE EXTENDED IN test_db LIKE 'foo';
+SHOW TABLE EXTENDED IN test_db LIKE `foo`;
+SHOW TABLE EXTENDED IN test_db LIKE 'ba*';
+SHOW TABLE EXTENDED IN test_db LIKE "ba*";
+SHOW TABLE EXTENDED IN test_db LIKE `ba*`;
+
 -- SHOW TABLES from a database with a name that requires escaping
 CREATE DATABASE `database`;
 USE `database`;

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out Mon Sep  8 04:38:17 2014
@@ -756,3 +756,445 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid with rollup
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid with rollup
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+explain select year from loc_orc group by year
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+explain select year from loc_orc group by year
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: year (type: int)
+              outputColumnNames: year
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: year (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: int)
+            outputColumnNames: _col0
+            Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

Modified: hive/branches/spark/ql/src/test/results/clientpositive/describe_table_json.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/describe_table_json.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/describe_table_json.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/describe_table_json.q.out Mon Sep  8 04:38:17 2014
@@ -20,7 +20,7 @@ PREHOOK: query: SHOW TABLE EXTENDED LIKE
 PREHOOK: type: SHOW_TABLESTATUS
 POSTHOOK: query: SHOW TABLE EXTENDED LIKE 'json*'
 POSTHOOK: type: SHOW_TABLESTATUS
-{"tables":[]}
+#### A masked pattern was here ####
 PREHOOK: query: ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable')
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@jsontable

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out Mon Sep  8 04:38:17 2014
@@ -44,12 +44,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -128,12 +128,12 @@ STAGE PLANS:
                 keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -200,12 +200,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -213,7 +213,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -229,7 +229,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -308,12 +308,12 @@ STAGE PLANS:
                 keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -405,12 +405,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
             Select Operator
               expressions: key (type: string), val (type: string)
@@ -421,7 +421,7 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -434,7 +434,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -450,7 +450,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -493,7 +493,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: rand() (type: double)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -501,7 +501,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -517,7 +517,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out Mon Sep  8 04:38:17 2014
@@ -56,7 +56,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -72,7 +72,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-              Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -162,7 +162,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -178,7 +178,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-              Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: double)
       Reduce Operator Tree:
         Group By Operator
@@ -290,7 +290,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 24 Data size: 168 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -306,7 +306,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-              Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 24 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -314,14 +314,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: final
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 12 Data size: 84 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12 Data size: 84 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12 Data size: 84 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out Mon Sep  8 04:38:17 2014
@@ -62,12 +62,12 @@ STAGE PLANS:
                 keys: a (type: string), b (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-                  Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -162,7 +162,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -178,7 +178,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-              Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: bigint)
       Reduce Operator Tree:
         Group By Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_rollup1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_rollup1.q.out?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_rollup1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_rollup1.q.out Mon Sep  8 04:38:17 2014
@@ -44,12 +44,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -122,12 +122,12 @@ STAGE PLANS:
                 keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -194,12 +194,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -207,7 +207,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -223,7 +223,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -296,12 +296,12 @@ STAGE PLANS:
                 keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -393,12 +393,12 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col3 (type: bigint)
             Select Operator
               expressions: key (type: string), val (type: string)
@@ -409,7 +409,7 @@ STAGE PLANS:
                 keys: key (type: string), val (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -422,7 +422,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -438,7 +438,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -481,7 +481,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: rand() (type: double)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -489,7 +489,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: partials
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -505,7 +505,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col3 (type: bigint)
       Reduce Operator Tree:
         Group By Operator