You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/30 17:22:48 UTC

svn commit: r1635536 [16/28] - in /hive/branches/spark: ./ accumulo-handler/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/ accumulo-handler/src/test/org/apache/hado...

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java Thu Oct 30 16:22:33 2014
@@ -48,7 +48,7 @@ public class TestWorker extends Compacto
   public void nothing() throws Exception {
     // Test that the whole things works when there's nothing in the queue.  This is just a
     // survival test.
-    startWorker(new HiveConf());
+    startWorker();
   }
 
   @Test
@@ -205,19 +205,17 @@ public class TestWorker extends Compacto
 
     Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols);
 
-    HiveConf conf = new HiveConf();
-
-    addBaseFile(conf, t, null, 20L, 20);
-    addDeltaFile(conf, t, null, 21L, 22L, 2);
-    addDeltaFile(conf, t, null, 23L, 24L, 2);
-    addDeltaFile(conf, t, null, 21L, 24L, 4);
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
+    addDeltaFile(t, null, 21L, 24L, 4);
 
     burnThroughTransactions(25);
 
     CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     // There should still be four directories in the location.
     FileSystem fs = FileSystem.get(conf);
@@ -232,12 +230,11 @@ public class TestWorker extends Compacto
 
     Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols);
     Partition p = newPartition(t, "today", sortCols);
-    HiveConf conf = new HiveConf();
 
-    addBaseFile(conf, t, p, 20L, 20);
-    addDeltaFile(conf, t, p, 21L, 22L, 2);
-    addDeltaFile(conf, t, p, 23L, 24L, 2);
-    addDeltaFile(conf, t, p, 21L, 24L, 4);
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+    addDeltaFile(t, p, 21L, 24L, 4);
 
     burnThroughTransactions(25);
 
@@ -245,7 +242,7 @@ public class TestWorker extends Compacto
     rqst.setPartitionname("ds=today");
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     // There should still be four directories in the location.
     FileSystem fs = FileSystem.get(conf);
@@ -258,18 +255,16 @@ public class TestWorker extends Compacto
     LOG.debug("Starting minorTableWithBase");
     Table t = newTable("default", "mtwb", false);
 
-    HiveConf conf = new HiveConf();
-
-    addBaseFile(conf, t, null, 20L, 20);
-    addDeltaFile(conf, t, null, 21L, 22L, 2);
-    addDeltaFile(conf, t, null, 23L, 24L, 2);
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
     CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
     txnHandler.compact(rqst);
 
-    startWorker(conf);
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -304,11 +299,10 @@ public class TestWorker extends Compacto
   public void minorPartitionWithBase() throws Exception {
     Table t = newTable("default", "mpwb", true);
     Partition p = newPartition(t, "today");
-    HiveConf conf = new HiveConf();
 
-    addBaseFile(conf, t, p, 20L, 20);
-    addDeltaFile(conf, t, p, 21L, 22L, 2);
-    addDeltaFile(conf, t, p, 23L, 24L, 2);
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
@@ -316,7 +310,7 @@ public class TestWorker extends Compacto
     rqst.setPartitionname("ds=today");
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -351,17 +345,15 @@ public class TestWorker extends Compacto
     LOG.debug("Starting minorTableWithBase");
     Table t = newTable("default", "mtnb", false);
 
-    HiveConf conf = new HiveConf();
-
-    addDeltaFile(conf, t, null, 1L, 2L, 2);
-    addDeltaFile(conf, t, null, 3L, 4L, 2);
+    addDeltaFile(t, null, 1L, 2L, 2);
+    addDeltaFile(t, null, 3L, 4L, 2);
 
     burnThroughTransactions(5);
 
     CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -396,18 +388,16 @@ public class TestWorker extends Compacto
     LOG.debug("Starting majorTableWithBase");
     Table t = newTable("default", "matwb", false);
 
-    HiveConf conf = new HiveConf();
-
-    addBaseFile(conf, t, null, 20L, 20);
-    addDeltaFile(conf, t, null, 21L, 22L, 2);
-    addDeltaFile(conf, t, null, 23L, 24L, 2);
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
     CompactionRequest rqst = new CompactionRequest("default", "matwb", CompactionType.MAJOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -428,8 +418,8 @@ public class TestWorker extends Compacto
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
-        Assert.assertEquals(1248L, buckets[0].getLen());
-        Assert.assertEquals(1248L, buckets[1].getLen());
+        Assert.assertEquals(624L, buckets[0].getLen());
+        Assert.assertEquals(624L, buckets[1].getLen());
       } else {
         LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
       }
@@ -442,11 +432,10 @@ public class TestWorker extends Compacto
     LOG.debug("Starting majorPartitionWithBase");
     Table t = newTable("default", "mapwb", true);
     Partition p = newPartition(t, "today");
-    HiveConf conf = new HiveConf();
 
-    addBaseFile(conf, t, p, 20L, 20);
-    addDeltaFile(conf, t, p, 21L, 22L, 2);
-    addDeltaFile(conf, t, p, 23L, 24L, 2);
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
@@ -454,7 +443,7 @@ public class TestWorker extends Compacto
     rqst.setPartitionname("ds=today");
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -475,8 +464,8 @@ public class TestWorker extends Compacto
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
-        Assert.assertEquals(1248L, buckets[0].getLen());
-        Assert.assertEquals(1248L, buckets[1].getLen());
+        Assert.assertEquals(624L, buckets[0].getLen());
+        Assert.assertEquals(624L, buckets[1].getLen());
       } else {
         LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
       }
@@ -489,17 +478,15 @@ public class TestWorker extends Compacto
     LOG.debug("Starting majorTableNoBase");
     Table t = newTable("default", "matnb", false);
 
-    HiveConf conf = new HiveConf();
-
-    addDeltaFile(conf, t, null, 1L, 2L, 2);
-    addDeltaFile(conf, t, null, 3L, 4L, 2);
+    addDeltaFile(t, null, 1L, 2L, 2);
+    addDeltaFile(t, null, 3L, 4L, 2);
 
     burnThroughTransactions(5);
 
     CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -520,8 +507,8 @@ public class TestWorker extends Compacto
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
-        Assert.assertEquals(208L, buckets[0].getLen());
-        Assert.assertEquals(208L, buckets[1].getLen());
+        Assert.assertEquals(104L, buckets[0].getLen());
+        Assert.assertEquals(104L, buckets[1].getLen());
       } else {
         LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
       }
@@ -534,18 +521,16 @@ public class TestWorker extends Compacto
     LOG.debug("Starting majorTableLegacy");
     Table t = newTable("default", "matl", false);
 
-    HiveConf conf = new HiveConf();
-
-    addLegacyFile(conf, t, null, 20);
-    addDeltaFile(conf, t, null, 21L, 22L, 2);
-    addDeltaFile(conf, t, null, 23L, 24L, 2);
+    addLegacyFile(t, null, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
     CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -566,8 +551,8 @@ public class TestWorker extends Compacto
         Assert.assertEquals(2, buckets.length);
         Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
         Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
-        Assert.assertEquals(1248L, buckets[0].getLen());
-        Assert.assertEquals(1248L, buckets[1].getLen());
+        Assert.assertEquals(624L, buckets[0].getLen());
+        Assert.assertEquals(624L, buckets[1].getLen());
       } else {
         LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
       }
@@ -580,18 +565,16 @@ public class TestWorker extends Compacto
     LOG.debug("Starting minorTableLegacy");
     Table t = newTable("default", "mtl", false);
 
-    HiveConf conf = new HiveConf();
-
-    addLegacyFile(conf, t, null, 20);
-    addDeltaFile(conf, t, null, 21L, 22L, 2);
-    addDeltaFile(conf, t, null, 23L, 24L, 2);
+    addLegacyFile(t, null, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
 
     burnThroughTransactions(25);
 
     CompactionRequest rqst = new CompactionRequest("default", "mtl", CompactionType.MINOR);
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -622,11 +605,11 @@ public class TestWorker extends Compacto
   public void majorPartitionWithBaseMissingBuckets() throws Exception {
     Table t = newTable("default", "mapwbmb", true);
     Partition p = newPartition(t, "today");
-    HiveConf conf = new HiveConf();
 
-    addBaseFile(conf, t, p, 20L, 20, 2, false);
-    addDeltaFile(conf, t, p, 21L, 22L, 2, 2, false);
-    addDeltaFile(conf, t, p, 23L, 24L, 2);
+
+    addBaseFile(t, p, 20L, 20, 2, false);
+    addDeltaFile(t, p, 21L, 22L, 2, 2, false);
+    addDeltaFile(t, p, 23L, 26L, 4);
 
     burnThroughTransactions(25);
 
@@ -634,7 +617,7 @@ public class TestWorker extends Compacto
     rqst.setPartitionname("ds=today");
     txnHandler.compact(rqst);
 
-    startWorker(new HiveConf());
+    startWorker();
 
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
@@ -649,7 +632,7 @@ public class TestWorker extends Compacto
     // Find the new delta file and make sure it has the right contents
     boolean sawNewBase = false;
     for (int i = 0; i < stat.length; i++) {
-      if (stat[i].getPath().getName().equals("base_0000024")) {
+      if (stat[i].getPath().getName().equals("base_0000026")) {
         sawNewBase = true;
         FileStatus[] buckets = fs.listStatus(stat[i].getPath());
         Assert.assertEquals(2, buckets.length);
@@ -658,10 +641,12 @@ public class TestWorker extends Compacto
         // Bucket 0 should be small and bucket 1 should be large, make sure that's the case
         Assert.assertTrue(
             ("bucket_00000".equals(buckets[0].getPath().getName()) && 104L == buckets[0].getLen()
-            && "bucket_00001".equals(buckets[1].getPath().getName()) && 1248L == buckets[1] .getLen())
+            && "bucket_00001".equals(buckets[1].getPath().getName()) && 676L == buckets[1]
+                .getLen())
             ||
             ("bucket_00000".equals(buckets[1].getPath().getName()) && 104L == buckets[1].getLen()
-            && "bucket_00001".equals(buckets[0].getPath().getName()) && 1248L == buckets[0] .getLen())
+            && "bucket_00001".equals(buckets[0].getPath().getName()) && 676L == buckets[0]
+                .getLen())
         );
       } else {
         LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
@@ -669,9 +654,4 @@ public class TestWorker extends Compacto
     }
     Assert.assertTrue(sawNewBase);
   }
-
-  @Before
-  public void setUpTxnDb() throws Exception {
-    TxnDbUtil.setConfValues(new HiveConf());
-  }
 }

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_disallow_transform.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_disallow_transform.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_disallow_transform.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_disallow_transform.q Thu Oct 30 16:22:33 2014
@@ -1,4 +1,6 @@
 set hive.test.authz.sstd.hs2.mode=true;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authorization.enabled=true;
 set role ALL;
-SELECT TRANSFORM (*) USING 'cat' AS (key, value) FROM src;
+create table t1(i int);
+SELECT TRANSFORM (*) USING 'cat' AS (key, value) FROM t1;

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q Thu Oct 30 16:22:33 2014
@@ -1,4 +1,4 @@
--- should fail: hive.fetch.task.conversion accepts minimal or more
+-- should fail: hive.fetch.task.conversion accepts none, minimal or more
 desc src;
 
 set hive.conf.validation=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q Thu Oct 30 16:22:33 2014
@@ -12,3 +12,5 @@ set hive.vectorized.execution.enabled=tr
 update acid_vectorized set b = 'foo' where b = 'bar';
 set hive.vectorized.execution.enabled=true;
 delete from acid_vectorized where b = 'foo';
+set hive.vectorized.execution.enabled=true;
+select a, b from acid_vectorized order by a, b;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/alter_partition_change_col.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/alter_partition_change_col.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/alter_partition_change_col.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/alter_partition_change_col.q Thu Oct 30 16:22:33 2014
@@ -6,54 +6,70 @@ SET hive.exec.dynamic.partition.mode = n
 create table alter_partition_change_col0 (c1 string, c2 string);
 load data local inpath '../../data/files/dec.txt' overwrite into table alter_partition_change_col0;
 
-create table alter_partition_change_col1 (c1 string, c2 string) partitioned by (p1 string);
+create table alter_partition_change_col1 (c1 string, c2 string) partitioned by (p1 string, p2 string);
 
-insert overwrite table alter_partition_change_col1 partition (p1)
-  select c1, c2, 'abc' from alter_partition_change_col0
+insert overwrite table alter_partition_change_col1 partition (p1, p2)
+  select c1, c2, 'abc', '123' from alter_partition_change_col0
   union all
-  select c1, c2, null from alter_partition_change_col0;
+  select c1, c2, null, '123' from alter_partition_change_col0;
   
 show partitions alter_partition_change_col1;
-select * from alter_partition_change_col1;
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- Change c2 to decimal(10,0)
 alter table alter_partition_change_col1 change c2 c2 decimal(10,0);
-alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(10,0);
-alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(10,0);
-select * from alter_partition_change_col1;
+alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(10,0);
+alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(10,0);
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not.
 alter table alter_partition_change_col1 change c2 c2 decimal(14,4);
 describe alter_partition_change_col1;
-describe alter_partition_change_col1 partition (p1='abc');
-select * from alter_partition_change_col1;
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- now change the column type of the existing partition
-alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(14,4);
-describe alter_partition_change_col1 partition (p1='abc');
-select * from alter_partition_change_col1;
+alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4);
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- change column for default partition value
-alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(14,4);
-describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__');
-select * from alter_partition_change_col1;
+alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4);
+describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- Try out replace columns
-alter table alter_partition_change_col1 partition (p1='abc') replace columns (c1 string);
+alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string);
 describe alter_partition_change_col1;
-describe alter_partition_change_col1 partition (p1='abc');
-select * from alter_partition_change_col1;
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
+
 alter table alter_partition_change_col1 replace columns (c1 string);
 describe alter_partition_change_col1;
-select * from alter_partition_change_col1;
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
 
 -- Try add columns
 alter table alter_partition_change_col1 add columns (c2 decimal(14,4));
 describe alter_partition_change_col1;
-describe alter_partition_change_col1 partition (p1='abc');
-select * from alter_partition_change_col1;
-
-alter table alter_partition_change_col1 partition (p1='abc') add columns (c2 decimal(14,4));
-describe alter_partition_change_col1 partition (p1='abc');
-select * from alter_partition_change_col1;
-
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
+
+alter table alter_partition_change_col1 partition (p1='abc', p2='123') add columns (c2 decimal(14,4));
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- Try changing column for all partitions at once
+alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0);
+describe alter_partition_change_col1 partition (p1='abc', p2='123');
+describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_partition_change_col1 where p1='abc';
+select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__';

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant2.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant2.q Thu Oct 30 16:22:33 2014
@@ -20,6 +20,7 @@ show principals src_role_wadmin;
 
 set user.name=user2;
 set role src_role_WadMin;
+show principals src_role_wadmin;
 -- grant role to another user
 grant src_Role_wadmin to user user3;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/cbo_correctness.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/cbo_correctness.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/cbo_correctness.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/cbo_correctness.q Thu Oct 30 16:22:33 2014
@@ -251,7 +251,7 @@ drop view v3;
 drop view v4;
 
 -- 11. Union All
-select * from t1 union all select * from t2 order by key;
+select * from t1 union all select * from t2 order by key, c_boolean, value, dt;
 select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key;
 select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join   (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key;
 
@@ -281,7 +281,7 @@ from src_cbo 
 where src_cbo.key not in  
   ( select key  from src_cbo s1 
     where s1.key > '2'
-  )
+  ) order by key
 ;
 
 -- non agg, corr
@@ -456,7 +456,33 @@ from (select b.key, count(*) 
 ) a
 ;
 
--- 17. get stats with empty partition list
+-- 20. Test get stats with empty partition list
 select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true;
 
+-- 21. Test groupby is empty and there is no other cols in aggr
+select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc;
+
+select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc;
+
+select unionsrc.key FROM (select 'max' as key, max(c_int) as value from t3 s1
+	UNION  ALL
+    	select 'min' as key,  min(c_int) as value from t3 s2
+    UNION ALL
+        select 'avg' as key,  avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key;
+        
+select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from t3 s1
+	UNION  ALL
+    	select 'min' as key,  min(c_int) as value from t3 s2
+    UNION ALL
+        select 'avg' as key,  avg(c_int) as value from t3 s3) unionsrc order by unionsrc.key;
+
+select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from t3 s1
+    UNION  ALL
+        select 'min' as key,  min(c_int) as value from t3 s2
+    UNION ALL
+        select 'avg' as key,  avg(c_int) as value from t3 s3) unionsrc group by unionsrc.key order by unionsrc.key;
+
+-- Windowing
+select *, rank() over(partition by key order by value) as rr from src1;
 
+select *, rank() over(partition by key order by value) from src1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/constprog2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/constprog2.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/constprog2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/constprog2.q Thu Oct 30 16:22:33 2014
@@ -7,4 +7,10 @@ SELECT src1.key, src1.key + 1, src2.valu
 
 SELECT src1.key, src1.key + 1, src2.value
        FROM src src1 join src src2 ON src1.key = src2.key AND src1.key = 86;
+EXPLAIN
+SELECT src1.key, src1.key + 1, src2.value
+       FROM src src1 join src src2 ON src1.key = src2.key AND cast(src1.key as double) = 86;
+
+SELECT src1.key, src1.key + 1, src2.value
+       FROM src src1 join src src2 ON src1.key = src2.key AND cast(src1.key as double) = 86;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/constprog_type.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/constprog_type.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/constprog_type.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/constprog_type.q Thu Oct 30 16:22:33 2014
@@ -12,3 +12,28 @@ SELECT cast('2013-11-17' as date), cast(
        FROM src tablesample (1 rows);
 
 SELECT * FROM dest1;
+
+SELECT key, value FROM src WHERE key = cast(86 as double);
+
+CREATE TABLE primitives1 (
+  id INT  ,
+  bool_col BOOLEAN  ,
+  tinyint_col TINYINT  ,
+  smallint_col SMALLINT  ,
+  int_col INT  ,
+  bigint_col BIGINT  ,
+  float_col FLOAT  ,
+  double_col DOUBLE  ,
+  date_string_col STRING  ,
+  string_col STRING  ,
+  timestamp_col TIMESTAMP  )
+ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY ','
+  ESCAPED BY '\\'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/types/primitives/090101.txt'
+OVERWRITE INTO TABLE primitives1 ;
+
+
+select id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col from primitives1 where id = cast (0 as float) and bool_col = cast('true' as boolean) and tinyint_col = cast(0 as double) and smallint_col = cast(0 as bigint) and int_col = cast (0 as double) and bigint_col = cast(0 as tinyint) and float_col = cast(0.0 as string) and  double_col = cast (0.0 as float);

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q Thu Oct 30 16:22:33 2014
@@ -33,3 +33,32 @@ DROP TABLE table4;
 
 CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:hive.root}/data/files/ext_test';
 SELECT * FROM table4;
+
+CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{
+  "namespace": "testing.hive.avro.serde",
+  "name": "doctors",
+  "type": "record",
+  "fields": [
+    {
+      "name":"number",
+      "type":"int",
+      "doc":"Order of playing the role"
+    },
+    {
+      "name":"first_name",
+      "type":"string",
+      "doc":"first name of actor playing role"
+    },
+    {
+      "name":"last_name",
+      "type":"string",
+      "doc":"last name of actor playing role"
+    }
+  ]
+}');
+
+alter table doctors set tblproperties ('k1'='v1', 'k2'='v2');
+DESCRIBE FORMATTED doctors;
+
+CREATE TABLE doctors2 like doctors;
+DESCRIBE FORMATTED doctors2;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q Thu Oct 30 16:22:33 2014
@@ -3,9 +3,11 @@
 -- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table
 
 -- group by
+
+
 explain
-create table summary as select *, sum(key), count(value) from src;
-create table summary as select *, sum(key), count(value) from src;
+create table summary as select *, key + 1, concat(value, value) from src limit 20;
+create table summary as select *, key + 1, concat(value, value) from src limit 20;
 describe formatted summary;
 select * from summary;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/decimal_serde.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/decimal_serde.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/decimal_serde.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/decimal_serde.q Thu Oct 30 16:22:33 2014
@@ -15,12 +15,15 @@ SELECT * FROM DECIMAL_TEXT ORDER BY key,
 CREATE TABLE DECIMAL_RC
 STORED AS RCFile AS
 SELECT * FROM DECIMAL_TEXT;
+describe formatted DECIMAL_RC;
 
 CREATE TABLE DECIMAL_LAZY_COL
 ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
 STORED AS RCFile AS
 SELECT * FROM DECIMAL_RC;
 
+describe formatted DECIMAL_LAZY_COL;
+
 CREATE TABLE DECIMAL_SEQUENCE
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '\001'

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q Thu Oct 30 16:22:33 2014
@@ -1,3 +1,2 @@
-SET hive.exec.drop.ignorenonexistent=false;
 DROP INDEX IF EXISTS UnknownIndex ON src;
 DROP INDEX IF EXISTS UnknownIndex ON UnknownTable;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/index_auto_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/index_auto_partitioned.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/index_auto_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/index_auto_partitioned.q Thu Oct 30 16:22:33 2014
@@ -1,4 +1,5 @@
 set hive.stats.dbclass=fs;
+set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 -- test automatic use of index on table with partitions

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q Thu Oct 30 16:22:33 2014
@@ -1,4 +1,5 @@
 set hive.stats.dbclass=fs;
+set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_vc.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_vc.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_vc.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_vc.q Thu Oct 30 16:22:33 2014
@@ -3,3 +3,10 @@
 explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3;
 
 select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3;
+
+explain
+select t2.BLOCK__OFFSET__INSIDE__FILE
+from src t1 join src t2 on t1.key = t2.key where t1.key < 100;
+
+select t2.BLOCK__OFFSET__INSIDE__FILE
+from src t1 join src t2 on t1.key = t2.key where t1.key < 100;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q Thu Oct 30 16:22:33 2014
@@ -23,3 +23,7 @@ select * from (select key from src where
 explain extended 
 select * from (select key from src union all select src.key from src left outer join srcpart on src.key = srcpart.key) a  where false;
 select * from (select key from src union all select src.key from src left outer join srcpart on src.key = srcpart.key) a  where false;
+
+explain extended 
+select * from src s1, src s2 where false and s1.value = s2.value;
+select * from src s1, src s2 where false and s1.value = s2.value;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q Thu Oct 30 16:22:33 2014
@@ -13,12 +13,14 @@ stored as ORC tblproperties("orc.row.ind
 
 -- insert creates separate orc files
 insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc;
+set hive.optimize.constant.propagation=false;
 insert into table vectororc select null, "b", rand(2), "zoo" from srcorc;
 insert into table vectororc select null, "c", rand(3), "zoo" from srcorc;
 insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc;
 insert into table vectororc select null, "e", rand(5), "z" from srcorc;
 insert into table vectororc select "apple", "f", rand(6), "z" from srcorc;
 insert into table vectororc select null, "g", rand(7), "zoo" from srcorc;
+set hive.optimize.constant.propagation=true;
 
 -- since vectororc table has multiple orc file we will load them into a single file using another table
 create table if not exists testorc

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/pcr.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/pcr.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/pcr.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/pcr.q Thu Oct 30 16:22:33 2014
@@ -138,4 +138,6 @@ insert overwrite table foo_field partiti
 select s,ds from foo_field where ((ds + s.a) > 0) order by ds,s;
 
 drop table foo_field;
-
+explain select key,value from srcpart where cast(hr as double)  = cast(11 as double);
+explain select key,value from srcpart where hr  = cast(11 as double);
+explain select key,value from srcpart where cast(hr as double)  = 11 ;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q Thu Oct 30 16:22:33 2014
@@ -32,5 +32,15 @@ from matchpath(on 
       arg2('LATE'), arg3(arr_delay > 15), 
     arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') 
    )
-where fl_num = 1142;       
+where fl_num = 1142;
+
+-- 3. empty partition.
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+        (select * from flights_tiny where fl_num = -1142) flights_tiny
+        sort by fl_num, year, month, day_of_month
+      arg1('LATE.LATE+'),
+      arg2('LATE'), arg3(arr_delay > 15),
+    arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+   );
    
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/show_functions.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/show_functions.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/show_functions.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/show_functions.q Thu Oct 30 16:22:33 2014
@@ -9,3 +9,23 @@ SHOW FUNCTIONS 'log.*';
 SHOW FUNCTIONS '.*date.*';
 
 SHOW FUNCTIONS '***';
+
+SHOW FUNCTIONS LIKE 'When';
+
+SHOW FUNCTIONS LIKE 'max|min';
+
+SHOW FUNCTIONS LIKE 'xpath*|m*';
+
+SHOW FUNCTIONS LIKE 'nomatch';
+
+SHOW FUNCTIONS LIKE "log";
+
+SHOW FUNCTIONS LIKE 'log';
+
+SHOW FUNCTIONS LIKE `log`;
+
+SHOW FUNCTIONS LIKE 'log*';
+
+SHOW FUNCTIONS LIKE "log*";
+
+SHOW FUNCTIONS LIKE `log*`;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/tez_smb_1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/tez_smb_1.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/tez_smb_1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/tez_smb_1.q Thu Oct 30 16:22:33 2014
@@ -33,6 +33,3 @@ set hive.auto.convert.join.noconditional
 explain
 select count(*) from tab s1 join tab s3 on s1.key=s3.key;
 
-select s1.key, s1.value, s3.value from tab s1 join tab s3 on s1.key=s3.key;
-select count(*) from tab s2;
-

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_data_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_data_types.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_data_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_data_types.q Thu Oct 30 16:22:33 2014
@@ -40,8 +40,14 @@ EXPLAIN SELECT t, si, i, b, f, d, bo, s,
 
 SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i LIMIT 20;
 
+SELECT SUM(HASH(*))
+FROM (SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i) as q;
+
 SET hive.vectorized.execution.enabled=true;
 
 EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i LIMIT 20;
 
-SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i LIMIT 20;
\ No newline at end of file
+SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i LIMIT 20;
+
+SELECT SUM(HASH(*))
+FROM (SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY i) as q;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q Thu Oct 30 16:22:33 2014
@@ -444,3 +444,7 @@ select p_retailprice, avg(p_retailprice)
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) 
 from part 
 where p_mfgr='Manufacturer#1';
+
+-- 47. empty partition
+select sum(p_size) over (partition by p_mfgr )
+from part where p_mfgr = 'm1';

Modified: hive/branches/spark/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out Thu Oct 30 16:22:33 2014
@@ -76,5 +76,5 @@ PREHOOK: query: show indexes on src_rc_c
 PREHOOK: type: SHOWINDEXES
 POSTHOOK: query: show indexes on src_rc_concatenate_test
 POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
+src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default.default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
 FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table default.src_rc_concatenate_test is indexed.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ambiguous_col.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ambiguous_col.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ambiguous_col.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ambiguous_col.q.out Thu Oct 30 16:22:33 2014
@@ -1 +1 @@
-FAILED: SemanticException [Error 10007]: Ambiguous column reference key
+FAILED: SemanticException [Error 10007]: Ambiguous column reference key in a

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_disallow_transform.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_disallow_transform.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_disallow_transform.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_disallow_transform.q.out Thu Oct 30 16:22:33 2014
@@ -2,9 +2,17 @@ PREHOOK: query: set role ALL
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ALL
 POSTHOOK: type: SHOW_ROLES
-PREHOOK: query: SELECT TRANSFORM (*) USING 'cat' AS (key, value) FROM src
+PREHOOK: query: create table t1(i int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: create table t1(i int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: SELECT TRANSFORM (*) USING 'cat' AS (key, value) FROM t1
 PREHOOK: type: QUERY
-PREHOOK: Input: default@src
+PREHOOK: Input: default@t1
 #### A masked pattern was here ####
 FAILED: Hive Internal Error: org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException(Query with transform clause is disallowed in current configuration.)
 org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException: Query with transform clause is disallowed in current configuration.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out Thu Oct 30 16:22:33 2014
@@ -1,4 +1,4 @@
 PREHOOK: query: -- This test will fail because hive_test_user is not in admin role
 show principals role1
 PREHOOK: type: SHOW_ROLE_PRINCIPALS
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out Thu Oct 30 16:22:33 2014
@@ -1,11 +1,11 @@
-PREHOOK: query: -- should fail: hive.fetch.task.conversion accepts minimal or more
+PREHOOK: query: -- should fail: hive.fetch.task.conversion accepts none, minimal or more
 desc src
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src
-POSTHOOK: query: -- should fail: hive.fetch.task.conversion accepts minimal or more
+POSTHOOK: query: -- should fail: hive.fetch.task.conversion accepts none, minimal or more
 desc src
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: default@src
 key                 	string              	default             
 value               	string              	default             
-Query returned non-zero code: 1, cause: 'SET hive.fetch.task.conversion=true' FAILED in validation : Invalid value.. expects one of [minimal, more].
+Query returned non-zero code: 1, cause: 'SET hive.fetch.task.conversion=true' FAILED in validation : Invalid value.. expects one of [none, minimal, more].

Modified: hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out Thu Oct 30 16:22:33 2014
@@ -42,3 +42,21 @@ POSTHOOK: query: delete from acid_vector
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_vectorized
 POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: select a, b from acid_vectorized order by a, b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+POSTHOOK: query: select a, b from acid_vectorized order by a, b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+-1073279343	oj1YrV5Wa
+-1073051226	A34p7oRr2WvUJNf
+-1072910839	0iqrc5
+-1072081801	dPkN74F7
+-1072076362	2uLyD28144vklju213J1mr
+-1071480828	aw724t8c5558x2xneC624
+-1071363017	Anj0oF
+-1070883071	0ruyd6Y50JpdGRf6HqD
+-1070551679	iUR3Q
+-1069736047	k17Am8uPHWk02cEf1jet

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out Thu Oct 30 16:22:33 2014
@@ -76,7 +76,7 @@ PREHOOK: query: show indexes on src_rc_c
 PREHOOK: type: SHOWINDEXES
 POSTHOOK: query: show indexes on src_rc_concatenate_test
 POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
+src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default.default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
 PREHOOK: query: alter table src_rc_concatenate_test concatenate
 PREHOOK: type: ALTER_TABLE_MERGE
 PREHOOK: Input: default@src_rc_concatenate_test
@@ -215,7 +215,7 @@ PREHOOK: query: show indexes on src_rc_c
 PREHOOK: type: SHOWINDEXES
 POSTHOOK: query: show indexes on src_rc_concatenate_test_part
 POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_part_index	src_rc_concatenate_test_part	key                 	default__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__	compact             	
+src_rc_concatenate_test_part_index	src_rc_concatenate_test_part	key                 	default.default__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__	compact             	
 PREHOOK: query: alter table src_rc_concatenate_test_part partition (ds='2011') concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE
 PREHOOK: Input: default@src_rc_concatenate_test_part