You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 06:00:54 UTC

svn commit: r1629563 [15/33] - in /hive/branches/spark: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ contr...

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_into2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_into2.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_into2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_into2.q Mon Oct  6 04:00:39 2014
@@ -1,3 +1,4 @@
+set hive.compute.query.using.stats=true;
 DROP TABLE insert_into2;
 CREATE TABLE insert_into2 (key int, value string) 
   PARTITIONED BY (ds string);
@@ -5,7 +6,12 @@ CREATE TABLE insert_into2 (key int, valu
 EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') 
   SELECT * FROM src LIMIT 100;
 INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100;
+explain
+select count (*) from insert_into2 where ds = '1';
+select count (*) from insert_into2 where ds = '1';
 INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100;
+explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1';
 SELECT COUNT(*) FROM insert_into2 WHERE ds='1';
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
@@ -19,6 +25,9 @@ INSERT OVERWRITE TABLE insert_into2 PART
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
 ) t;
+explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
 
 EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src LIMIT 50;
@@ -27,5 +36,11 @@ INSERT OVERWRITE TABLE insert_into2 PART
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
 ) t;
+explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
+
 
 DROP TABLE insert_into2;
+
+set hive.compute.query.using.stats=false;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_iot(
@@ -15,7 +14,7 @@ create table acid_iot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc;
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_iot;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_iud select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,12 +1,11 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table ivdp(i int,
                  de decimal(5,2),
-                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc;
+                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table ivdp partition (ds) values 
     (1, 109.23, 'and everywhere that mary went', 'today'),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivnp(ti tinyint,
@@ -15,7 +14,7 @@ create table acid_ivnp(ti tinyint,
                  b boolean,
                  s string,
                  vc varchar(128),
-                 ch char(12)) clustered by (i) into 2 buckets stored as orc;
+                 ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_ivnp values 
     (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivot(
@@ -15,7 +14,7 @@ create table acid_ivot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc;
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivp(ti tinyint,
@@ -14,7 +13,7 @@ create table acid_ivp(ti tinyint,
                  dt date,
                  s string,
                  vc varchar(128),
-                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc;
+                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_ivp partition (ds='today') values 
     (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q Mon Oct  6 04:00:39 2014
@@ -1,12 +1,12 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc;
+create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_ivtt values 
     (1, 109.23, 'mary had a little lamb'),
-    (429496729, 0.14, 'its fleece was white as snow');
+    (429496729, 0.14, 'its fleece was white as snow'),
+    (-29496729, -0.14, 'negative values test');
 
 select i, de, vc from acid_ivtt order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q Mon Oct  6 04:00:39 2014
@@ -5,6 +5,10 @@ select key from src where false;
 select key from src where false;
 
 explain extended
+select count(key) from srcpart where 1=2 group by key;
+select count(key) from srcpart where 1=2 group by key;
+
+explain extended
 select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key;
 select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q Mon Oct  6 04:00:39 2014
@@ -10,9 +10,14 @@ CREATE TABLE parquet_types_staging (
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':';
 
 CREATE TABLE parquet_types (
   cint int,
@@ -23,7 +28,10 @@ CREATE TABLE parquet_types (
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging;
@@ -32,6 +40,8 @@ INSERT OVERWRITE TABLE parquet_types SEL
 
 SELECT * FROM parquet_types;
 
+SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types;
+
 SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q Mon Oct  6 04:00:39 2014
@@ -1,4 +1,4 @@
-
+-- SORT_BEFORE_DIFF
 
 create table partition_test_partitioned(key string, value string) partitioned by (dt string);
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q Mon Oct  6 04:00:39 2014
@@ -10,6 +10,7 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -24,6 +25,7 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q Mon Oct  6 04:00:39 2014
@@ -1,12 +1,11 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uami(i int,
                  de decimal(5,2),
-                 vc varchar(128)) clustered by (i) into 2 buckets stored as orc;
+                 vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uami values 
     (1, 109.23, 'mary had a little lamb'),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10;
 insert into table acid_uap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 10 order by cint limit 10;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uat(ti tinyint,
@@ -15,7 +14,7 @@ create table acid_uat(ti tinyint,
                  s string,
                  vc varchar(128),
                  ch char(36),
-                 b boolean) clustered by (i) into 2 buckets stored as orc;
+                 b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uat
     select ctinyint,
@@ -53,4 +52,11 @@ update acid_uat set
 
 select * from acid_uat order by i;
 
+update acid_uat set
+  ti = ti * 2,
+  si = cast(f as int),
+  d = floor(de)
+  where s = 'aw724t8c5558x2xneC624';
+
 
+select * from acid_uat order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table;
@@ -18,7 +17,7 @@ create table acid_uot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table';
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table' TBLPROPERTIES ('transactional'='true');
 
 update acid_uot set cstring1 = 'fred' where cint < -1070551679;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc;
+create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_utt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc;
+create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_utc select cint, cast(cstring1 as varchar(128)), cfloat from alltypesorc where cint < 0 order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_wnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10;
 insert into table acid_uwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 100 order by cint limit 10;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q Mon Oct  6 04:00:39 2014
@@ -41,3 +41,16 @@ order by key desc
 limit 5;
 
 drop table char_2;
+
+
+-- Implicit conversion.  Occurs in reduce-side under Tez.
+create table char_3 (
+  field char(12)
+) stored as orc;
+
+explain
+insert into table char_3 select cint from alltypesorc limit 10;
+
+insert into table char_3 select cint from alltypesorc limit 10;
+
+drop table char_3;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q Mon Oct  6 04:00:39 2014
@@ -1,12 +1,12 @@
 SET hive.vectorized.execution.enabled=true;
-drop table char_2;
+drop table varchar_2;
 
-create table char_2 (
+create table varchar_2 (
   key varchar(10),
   value varchar(20)
 ) stored as orc;
 
-insert overwrite table char_2 select * from src;
+insert overwrite table varchar_2 select * from src;
 
 select key, value
 from src
@@ -14,13 +14,13 @@ order by key asc
 limit 5;
 
 explain select key, value
-from char_2
+from varchar_2
 order by key asc
 limit 5;
 
 -- should match the query from src
 select key, value
-from char_2
+from varchar_2
 order by key asc
 limit 5;
 
@@ -30,14 +30,26 @@ order by key desc
 limit 5;
 
 explain select key, value
-from char_2
+from varchar_2
 order by key desc
 limit 5;
 
 -- should match the query from src
 select key, value
-from char_2
+from varchar_2
 order by key desc
 limit 5;
 
-drop table char_2;
+drop table varchar_2;
+
+-- Implicit conversion.  Occurs in reduce-side under Tez.
+create table varchar_3 (
+  field varchar(25)
+) stored as orc;
+
+explain
+insert into table varchar_3 select cint from alltypesorc limit 10;
+
+insert into table varchar_3 select cint from alltypesorc limit 10;
+
+drop table varchar_3;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_0.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_0.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_0.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_0.q Mon Oct  6 04:00:39 2014
@@ -1,4 +1,180 @@
 SET hive.vectorized.execution.enabled=true;
+
+-- Use ORDER BY clauses to generate 2 stages.
+EXPLAIN
+SELECT MIN(ctinyint) as c1,
+       MAX(ctinyint),
+       COUNT(ctinyint),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT MIN(ctinyint) as c1,
+       MAX(ctinyint),
+       COUNT(ctinyint),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT SUM(ctinyint) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT SUM(ctinyint) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN 
+SELECT
+  avg(ctinyint) as c1,
+  variance(ctinyint),
+  var_pop(ctinyint),
+  var_samp(ctinyint),
+  std(ctinyint),
+  stddev(ctinyint),
+  stddev_pop(ctinyint),
+  stddev_samp(ctinyint)
+FROM alltypesorc
+ORDER BY c1;
+
+SELECT
+  avg(ctinyint) as c1,
+  variance(ctinyint),
+  var_pop(ctinyint),
+  var_samp(ctinyint),
+  std(ctinyint),
+  stddev(ctinyint),
+  stddev_pop(ctinyint),
+  stddev_samp(ctinyint)
+FROM alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT MIN(cbigint) as c1,
+       MAX(cbigint),
+       COUNT(cbigint),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT MIN(cbigint) as c1,
+       MAX(cbigint),
+       COUNT(cbigint),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT SUM(cbigint) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT SUM(cbigint) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN 
+SELECT
+  avg(cbigint) as c1,
+  variance(cbigint),
+  var_pop(cbigint),
+  var_samp(cbigint),
+  std(cbigint),
+  stddev(cbigint),
+  stddev_pop(cbigint),
+  stddev_samp(cbigint)
+FROM alltypesorc
+ORDER BY c1;
+
+SELECT
+  avg(cbigint) as c1,
+  variance(cbigint),
+  var_pop(cbigint),
+  var_samp(cbigint),
+  std(cbigint),
+  stddev(cbigint),
+  stddev_pop(cbigint),
+  stddev_samp(cbigint)
+FROM alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT MIN(cfloat) as c1,
+       MAX(cfloat),
+       COUNT(cfloat),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT MIN(cfloat) as c1,
+       MAX(cfloat),
+       COUNT(cfloat),
+       COUNT(*)
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT SUM(cfloat) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+SELECT SUM(cfloat) as c1
+FROM   alltypesorc
+ORDER BY c1;
+
+EXPLAIN 
+SELECT
+  avg(cfloat) as c1,
+  variance(cfloat),
+  var_pop(cfloat),
+  var_samp(cfloat),
+  std(cfloat),
+  stddev(cfloat),
+  stddev_pop(cfloat),
+  stddev_samp(cfloat)
+FROM alltypesorc
+ORDER BY c1;
+
+SELECT
+  avg(cfloat) as c1,
+  variance(cfloat),
+  var_pop(cfloat),
+  var_samp(cfloat),
+  std(cfloat),
+  stddev(cfloat),
+  stddev_pop(cfloat),
+  stddev_samp(cfloat)
+FROM alltypesorc
+ORDER BY c1;
+
+EXPLAIN
+SELECT AVG(cbigint),
+       (-(AVG(cbigint))),
+       (-6432 + AVG(cbigint)),
+       STDDEV_POP(cbigint),
+       (-((-6432 + AVG(cbigint)))),
+       ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))),
+       VAR_SAMP(cbigint),
+       (-((-6432 + AVG(cbigint)))),
+       (-6432 + (-((-6432 + AVG(cbigint))))),
+       (-((-6432 + AVG(cbigint)))),
+       ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))),
+       COUNT(*),
+       SUM(cfloat),
+       (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)),
+       (-(VAR_SAMP(cbigint))),
+       ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))),
+       MIN(ctinyint),
+       (-(MIN(ctinyint)))
+FROM   alltypesorc
+WHERE  (((cstring2 LIKE '%b%')
+         OR ((79.553 != cint)
+             OR (cbigint < cdouble)))
+        OR ((ctinyint >= csmallint)
+            AND ((cboolean2 = 1)
+                 AND (3569 = ctinyint))));
+
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q Mon Oct  6 04:00:39 2014
@@ -122,4 +122,20 @@ SELECT 
 FROM date_udf_flight_orc LIMIT 10;
 
 -- Test extracting the date part of expression that includes time
-SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1;
\ No newline at end of file
+SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1;
+
+EXPLAIN SELECT
+  min(fl_date) AS c1,
+  max(fl_date),
+  count(fl_date),
+  count(*)
+FROM date_udf_flight_orc
+ORDER BY c1;
+
+SELECT
+  min(fl_date) AS c1,
+  max(fl_date),
+  count(fl_date),
+  count(*)
+FROM date_udf_flight_orc
+ORDER BY c1;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,7 @@
-SET hive.vectorized.execution.enabled = true;
-
 -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end.
+-- Turning on vectorization has been temporarily moved after filling the test table
+-- due to bug HIVE-8197.
+
 
 CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC;
 
@@ -11,6 +12,8 @@ SELECT
 FROM alltypesorc
 LIMIT 40;
 
+SET hive.vectorized.execution.enabled = true;
+
 CREATE TABLE alltypesorc_wrong(stimestamp1 string) STORED AS ORC;
 
 INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1;
@@ -122,3 +125,48 @@ SELECT
   second(stimestamp1)
 FROM alltypesorc_wrong
 ORDER BY c1;
+
+EXPLAIN SELECT
+  min(ctimestamp1),
+  max(ctimestamp1),
+  count(ctimestamp1),
+  count(*)
+FROM alltypesorc_string;
+
+SELECT
+  min(ctimestamp1),
+  max(ctimestamp1),
+  count(ctimestamp1),
+  count(*)
+FROM alltypesorc_string;
+
+-- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)...
+EXPLAIN SELECT
+  sum(ctimestamp1)
+FROM alltypesorc_string;
+
+SELECT
+ sum(ctimestamp1)
+FROM alltypesorc_string;
+
+EXPLAIN SELECT
+  avg(ctimestamp1),
+  variance(ctimestamp1),
+  var_pop(ctimestamp1),
+  var_samp(ctimestamp1),
+  std(ctimestamp1),
+  stddev(ctimestamp1),
+  stddev_pop(ctimestamp1),
+  stddev_samp(ctimestamp1)
+FROM alltypesorc_string;
+
+SELECT
+  avg(ctimestamp1),
+  variance(ctimestamp1),
+  var_pop(ctimestamp1),
+  var_samp(ctimestamp1),
+  std(ctimestamp1),
+  stddev(ctimestamp1),
+  stddev_pop(ctimestamp1),
+  stddev_samp(ctimestamp1)
+FROM alltypesorc_string;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/positive/udf6.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/positive/udf6.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/positive/udf6.q (original)
+++ hive/branches/spark/ql/src/test/queries/positive/udf6.q Mon Oct  6 04:00:39 2014
@@ -1 +1 @@
-FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2)
+FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2) + key

Modified: hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out (original)
+++ hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out Mon Oct  6 04:00:39 2014
@@ -38,7 +38,7 @@ File Statistics:
   Column 3: count: 21000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 6910238
 
 Stripes:
-  Stripe: offset: 3 data: 151109 rows: 5000 tail: 68 index: 704
+  Stripe: offset: 3 data: 151108 rows: 5000 tail: 68 index: 704
     Stream: column 0 section ROW_INDEX start: 3 length 15
     Stream: column 1 section ROW_INDEX start: 18 length 156
     Stream: column 2 section ROW_INDEX start: 174 length 172
@@ -46,7 +46,7 @@ Stripes:
     Stream: column 1 section DATA start: 707 length 20029
     Stream: column 2 section DATA start: 20736 length 40035
     Stream: column 3 section DATA start: 60771 length 86757
-    Stream: column 3 section LENGTH start: 147528 length 4288
+    Stream: column 3 section LENGTH start: 147528 length 4287
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -65,19 +65,19 @@ Stripes:
       Entry 4: count: 1000 min: -9216505819108477308 max: 9196474183833079923 positions: 20006,8686,416
     Row group index column 3:
       Entry 0: count: 1000 min: Darkness,-230 max: worst-54-290-346-648-908-996 positions: 0,0,0,0,0
-      Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,696,18
-      Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1555,14
-      Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2373,90
-      Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3355,108
-  Stripe: offset: 151884 data: 336358 rows: 5000 tail: 69 index: 941
-    Stream: column 0 section ROW_INDEX start: 151884 length 15
-    Stream: column 1 section ROW_INDEX start: 151899 length 150
-    Stream: column 2 section ROW_INDEX start: 152049 length 167
-    Stream: column 3 section ROW_INDEX start: 152216 length 609
-    Stream: column 1 section DATA start: 152825 length 20029
-    Stream: column 2 section DATA start: 172854 length 40035
-    Stream: column 3 section DATA start: 212889 length 270789
-    Stream: column 3 section LENGTH start: 483678 length 5505
+      Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,695,18
+      Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1554,14
+      Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2372,90
+      Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3354,108
+  Stripe: offset: 151883 data: 336358 rows: 5000 tail: 69 index: 941
+    Stream: column 0 section ROW_INDEX start: 151883 length 15
+    Stream: column 1 section ROW_INDEX start: 151898 length 150
+    Stream: column 2 section ROW_INDEX start: 152048 length 167
+    Stream: column 3 section ROW_INDEX start: 152215 length 609
+    Stream: column 1 section DATA start: 152824 length 20029
+    Stream: column 2 section DATA start: 172853 length 40035
+    Stream: column 3 section DATA start: 212888 length 270789
+    Stream: column 3 section LENGTH start: 483677 length 5505
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -100,15 +100,15 @@ Stripes:
       Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988 positions: 80822,9756,0,1945,222
       Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984 positions: 137149,4496,0,3268,48
       Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938 positions: 197972,6590,0,4064,342
-  Stripe: offset: 489252 data: 558031 rows: 5000 tail: 69 index: 1169
-    Stream: column 0 section ROW_INDEX start: 489252 length 15
-    Stream: column 1 section ROW_INDEX start: 489267 length 159
-    Stream: column 2 section ROW_INDEX start: 489426 length 169
-    Stream: column 3 section ROW_INDEX start: 489595 length 826
-    Stream: column 1 section DATA start: 490421 length 20029
-    Stream: column 2 section DATA start: 510450 length 40035
-    Stream: column 3 section DATA start: 550485 length 492258
-    Stream: column 3 section LENGTH start: 1042743 length 5709
+  Stripe: offset: 489251 data: 558031 rows: 5000 tail: 69 index: 1169
+    Stream: column 0 section ROW_INDEX start: 489251 length 15
+    Stream: column 1 section ROW_INDEX start: 489266 length 159
+    Stream: column 2 section ROW_INDEX start: 489425 length 169
+    Stream: column 3 section ROW_INDEX start: 489594 length 826
+    Stream: column 1 section DATA start: 490420 length 20029
+    Stream: column 2 section DATA start: 510449 length 40035
+    Stream: column 3 section DATA start: 550484 length 492258
+    Stream: column 3 section LENGTH start: 1042742 length 5709
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -131,15 +131,15 @@ Stripes:
       Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976 positions: 170641,3422,0,2077,16
 2
       Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13
 246-13502-13766 positions: 268420,9960,0,3369,16
       Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12
 782-12790-12802-12976-13216-13246-13502-13766-14454-14974 positions: 377916,1620,0,4041,470
-  Stripe: offset: 1048521 data: 792850 rows: 5000 tail: 69 index: 1351
-    Stream: column 0 section ROW_INDEX start: 1048521 length 15
-    Stream: column 1 section ROW_INDEX start: 1048536 length 149
-    Stream: column 2 section ROW_INDEX start: 1048685 length 170
-    Stream: column 3 section ROW_INDEX start: 1048855 length 1017
-    Stream: column 1 section DATA start: 1049872 length 20029
-    Stream: column 2 section DATA start: 1069901 length 40035
-    Stream: column 3 section DATA start: 1109936 length 727038
-    Stream: column 3 section LENGTH start: 1836974 length 5748
+  Stripe: offset: 1048520 data: 792850 rows: 5000 tail: 69 index: 1351
+    Stream: column 0 section ROW_INDEX start: 1048520 length 15
+    Stream: column 1 section ROW_INDEX start: 1048535 length 149
+    Stream: column 2 section ROW_INDEX start: 1048684 length 170
+    Stream: column 3 section ROW_INDEX start: 1048854 length 1017
+    Stream: column 1 section DATA start: 1049871 length 20029
+    Stream: column 2 section DATA start: 1069900 length 40035
+    Stream: column 3 section DATA start: 1109935 length 727038
+    Stream: column 3 section LENGTH start: 1836973 length 5748
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -162,15 +162,15 @@ Stripes:
       Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9
 938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878 positions: 263111,206,0,1926,462
       Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-93
 44-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788 positions: 407371,8480,0,3444,250
       Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8
 620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904 positions: 562094,3058,0,4643,292
-  Stripe: offset: 1842791 data: 188033 rows: 1000 tail: 67 index: 832
-    Stream: column 0 section ROW_INDEX start: 1842791 length 10
-    Stream: column 1 section ROW_INDEX start: 1842801 length 36
-    Stream: column 2 section ROW_INDEX start: 1842837 length 39
-    Stream: column 3 section ROW_INDEX start: 1842876 length 747
-    Stream: column 1 section DATA start: 1843623 length 4007
-    Stream: column 2 section DATA start: 1847630 length 8007
-    Stream: column 3 section DATA start: 1855637 length 174759
-    Stream: column 3 section LENGTH start: 2030396 length 1260
+  Stripe: offset: 1842790 data: 188033 rows: 1000 tail: 67 index: 832
+    Stream: column 0 section ROW_INDEX start: 1842790 length 10
+    Stream: column 1 section ROW_INDEX start: 1842800 length 36
+    Stream: column 2 section ROW_INDEX start: 1842836 length 39
+    Stream: column 3 section ROW_INDEX start: 1842875 length 747
+    Stream: column 1 section DATA start: 1843622 length 4007
+    Stream: column 2 section DATA start: 1847629 length 8007
+    Stream: column 3 section DATA start: 1855636 length 174759
+    Stream: column 3 section LENGTH start: 2030395 length 1260
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -182,6 +182,6 @@ Stripes:
     Row group index column 3:
       Entry 0: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-
 7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 positions: 0,0,0,0,0
 
-File length: 2033559 bytes
+File length: 2033557 bytes
 Padding length: 0 bytes
 Padding ratio: 0%

Modified: hive/branches/spark/ql/src/test/resources/orc-file-dump.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/resources/orc-file-dump.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/resources/orc-file-dump.out (original)
+++ hive/branches/spark/ql/src/test/resources/orc-file-dump.out Mon Oct  6 04:00:39 2014
@@ -38,16 +38,16 @@ File Statistics:
   Column 3: count: 21000 min: Darkness, max: worst sum: 81761
 
 Stripes:
-  Stripe: offset: 3 data: 63766 rows: 5000 tail: 79 index: 428
+  Stripe: offset: 3 data: 63765 rows: 5000 tail: 79 index: 428
     Stream: column 0 section ROW_INDEX start: 3 length 15
     Stream: column 1 section ROW_INDEX start: 18 length 158
     Stream: column 2 section ROW_INDEX start: 176 length 171
     Stream: column 3 section ROW_INDEX start: 347 length 84
     Stream: column 1 section DATA start: 431 length 20029
     Stream: column 2 section DATA start: 20460 length 40035
-    Stream: column 3 section DATA start: 60495 length 3544
-    Stream: column 3 section LENGTH start: 64039 length 25
-    Stream: column 3 section DICTIONARY_DATA start: 64064 length 133
+    Stream: column 3 section DATA start: 60495 length 3543
+    Stream: column 3 section LENGTH start: 64038 length 25
+    Stream: column 3 section DICTIONARY_DATA start: 64063 length 133
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -68,18 +68,18 @@ Stripes:
       Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0
       Entry 1: count: 1000 min: Darkness, max: worst positions: 0,659,149
       Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1531,3
-      Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2282,32
-      Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3034,45
-  Stripe: offset: 64276 data: 63755 rows: 5000 tail: 79 index: 421
-    Stream: column 0 section ROW_INDEX start: 64276 length 15
-    Stream: column 1 section ROW_INDEX start: 64291 length 157
-    Stream: column 2 section ROW_INDEX start: 64448 length 169
-    Stream: column 3 section ROW_INDEX start: 64617 length 80
-    Stream: column 1 section DATA start: 64697 length 20029
-    Stream: column 2 section DATA start: 84726 length 40035
-    Stream: column 3 section DATA start: 124761 length 3533
-    Stream: column 3 section LENGTH start: 128294 length 25
-    Stream: column 3 section DICTIONARY_DATA start: 128319 length 133
+      Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2281,32
+      Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3033,45
+  Stripe: offset: 64275 data: 63754 rows: 5000 tail: 79 index: 421
+    Stream: column 0 section ROW_INDEX start: 64275 length 15
+    Stream: column 1 section ROW_INDEX start: 64290 length 157
+    Stream: column 2 section ROW_INDEX start: 64447 length 169
+    Stream: column 3 section ROW_INDEX start: 64616 length 80
+    Stream: column 1 section DATA start: 64696 length 20029
+    Stream: column 2 section DATA start: 84725 length 40035
+    Stream: column 3 section DATA start: 124760 length 3532
+    Stream: column 3 section LENGTH start: 128292 length 25
+    Stream: column 3 section DICTIONARY_DATA start: 128317 length 133
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -101,17 +101,17 @@ Stripes:
       Entry 1: count: 1000 min: Darkness, max: worst positions: 0,761,12
       Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1472,70
       Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2250,43
-      Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2979,88
-  Stripe: offset: 128531 data: 63766 rows: 5000 tail: 79 index: 422
-    Stream: column 0 section ROW_INDEX start: 128531 length 15
-    Stream: column 1 section ROW_INDEX start: 128546 length 153
-    Stream: column 2 section ROW_INDEX start: 128699 length 169
-    Stream: column 3 section ROW_INDEX start: 128868 length 85
-    Stream: column 1 section DATA start: 128953 length 20029
-    Stream: column 2 section DATA start: 148982 length 40035
-    Stream: column 3 section DATA start: 189017 length 3544
-    Stream: column 3 section LENGTH start: 192561 length 25
-    Stream: column 3 section DICTIONARY_DATA start: 192586 length 133
+      Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2978,88
+  Stripe: offset: 128529 data: 63766 rows: 5000 tail: 79 index: 422
+    Stream: column 0 section ROW_INDEX start: 128529 length 15
+    Stream: column 1 section ROW_INDEX start: 128544 length 153
+    Stream: column 2 section ROW_INDEX start: 128697 length 169
+    Stream: column 3 section ROW_INDEX start: 128866 length 85
+    Stream: column 1 section DATA start: 128951 length 20029
+    Stream: column 2 section DATA start: 148980 length 40035
+    Stream: column 3 section DATA start: 189015 length 3544
+    Stream: column 3 section LENGTH start: 192559 length 25
+    Stream: column 3 section DICTIONARY_DATA start: 192584 length 133
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -134,16 +134,16 @@ Stripes:
       Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1469,69
       Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2133,194
       Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3005,43
-  Stripe: offset: 192798 data: 63796 rows: 5000 tail: 79 index: 425
-    Stream: column 0 section ROW_INDEX start: 192798 length 15
-    Stream: column 1 section ROW_INDEX start: 192813 length 156
-    Stream: column 2 section ROW_INDEX start: 192969 length 168
-    Stream: column 3 section ROW_INDEX start: 193137 length 86
-    Stream: column 1 section DATA start: 193223 length 20029
-    Stream: column 2 section DATA start: 213252 length 40035
-    Stream: column 3 section DATA start: 253287 length 3574
-    Stream: column 3 section LENGTH start: 256861 length 25
-    Stream: column 3 section DICTIONARY_DATA start: 256886 length 133
+  Stripe: offset: 192796 data: 63796 rows: 5000 tail: 79 index: 425
+    Stream: column 0 section ROW_INDEX start: 192796 length 15
+    Stream: column 1 section ROW_INDEX start: 192811 length 156
+    Stream: column 2 section ROW_INDEX start: 192967 length 168
+    Stream: column 3 section ROW_INDEX start: 193135 length 86
+    Stream: column 1 section DATA start: 193221 length 20029
+    Stream: column 2 section DATA start: 213250 length 40035
+    Stream: column 3 section DATA start: 253285 length 3574
+    Stream: column 3 section LENGTH start: 256859 length 25
+    Stream: column 3 section DICTIONARY_DATA start: 256884 length 133
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -166,16 +166,16 @@ Stripes:
       Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1485,52
       Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2196,104
       Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2934,131
-  Stripe: offset: 257098 data: 12940 rows: 1000 tail: 71 index: 123
-    Stream: column 0 section ROW_INDEX start: 257098 length 10
-    Stream: column 1 section ROW_INDEX start: 257108 length 36
-    Stream: column 2 section ROW_INDEX start: 257144 length 39
-    Stream: column 3 section ROW_INDEX start: 257183 length 38
-    Stream: column 1 section DATA start: 257221 length 4007
-    Stream: column 2 section DATA start: 261228 length 8007
-    Stream: column 3 section DATA start: 269235 length 768
-    Stream: column 3 section LENGTH start: 270003 length 25
-    Stream: column 3 section DICTIONARY_DATA start: 270028 length 133
+  Stripe: offset: 257096 data: 12940 rows: 1000 tail: 71 index: 123
+    Stream: column 0 section ROW_INDEX start: 257096 length 10
+    Stream: column 1 section ROW_INDEX start: 257106 length 36
+    Stream: column 2 section ROW_INDEX start: 257142 length 39
+    Stream: column 3 section ROW_INDEX start: 257181 length 38
+    Stream: column 1 section DATA start: 257219 length 4007
+    Stream: column 2 section DATA start: 261226 length 8007
+    Stream: column 3 section DATA start: 269233 length 768
+    Stream: column 3 section LENGTH start: 270001 length 25
+    Stream: column 3 section DICTIONARY_DATA start: 270026 length 133
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
@@ -187,6 +187,6 @@ Stripes:
     Row group index column 3:
       Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0
 
-File length: 270759 bytes
+File length: 270760 bytes
 Padding length: 0 bytes
 Padding ratio: 0%

Modified: hive/branches/spark/ql/src/test/results/clientnegative/acid_overwrite.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/acid_overwrite.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/acid_overwrite.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/acid_overwrite.q.out Mon Oct  6 04:00:39 2014
@@ -1,8 +1,8 @@
-PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc
+PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_uanp
-POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc
+POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_uanp

Modified: hive/branches/spark/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out Mon Oct  6 04:00:39 2014
@@ -27,4 +27,4 @@ PREHOOK: query: ALTER TABLE part_whiteli
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@part_whitelist_test
 PREHOOK: Output: default@part_whitelist_test@ds=1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'.  (configure with hive.metastore.partition.name.whitelist.pattern)

Modified: hive/branches/spark/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out Mon Oct  6 04:00:39 2014
@@ -35,4 +35,4 @@ PREHOOK: query: alter table alter_rename
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@alter_rename_partition
 PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out Mon Oct  6 04:00:39 2014
@@ -1,10 +1,10 @@
 PREHOOK: query: -- check update without update priv
-create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc
+create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@auth_nodel
 POSTHOOK: query: -- check update without update priv
-create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc
+create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@auth_nodel

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out Mon Oct  6 04:00:39 2014
@@ -1,10 +1,10 @@
 PREHOOK: query: -- check update without update priv
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc
+create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@auth_noupd
 POSTHOOK: query: -- check update without update priv
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc
+create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@auth_noupd

Modified: hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out Mon Oct  6 04:00:39 2014
@@ -107,4 +107,5 @@ ALTER TABLE test_table123 REPLACE COLUMN
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+b

Modified: hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out Mon Oct  6 04:00:39 2014
@@ -40,4 +40,5 @@ PREHOOK: query: ALTER TABLE test_table12
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+b

Modified: hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out Mon Oct  6 04:00:39 2014
@@ -43,9 +43,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*) from part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
 #### A masked pattern was here ####
 2000
 PREHOOK: query: explain select count(*) from part
@@ -62,31 +64,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: part
-            Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientnegative/temp_table_rename.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/temp_table_rename.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/temp_table_rename.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/temp_table_rename.q.out Mon Oct  6 04:00:39 2014
@@ -18,4 +18,4 @@ PREHOOK: query: alter table tmp2 rename 
 PREHOOK: type: ALTERTABLE_RENAME
 PREHOOK: Input: default@tmp2
 PREHOOK: Output: default@tmp2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out Mon Oct  6 04:00:39 2014
@@ -1,5 +1,6 @@
 PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '../../data/files/sales.txt'
 PREHOOK: type: CREATEFUNCTION
+#### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default.lookup
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. Hive warehouse is non-local, but ../../data/files/sales.txt specifies file on local filesystem. Resources on non-local warehouse should specify a non-local scheme/path

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out Mon Oct  6 04:00:39 2014
@@ -2,5 +2,6 @@ PREHOOK: query: create function lookup a
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
 PREHOOK: Output: default.lookup
+PREHOOK: Output: nonexistent_file.txt
 nonexistent_file.txt does not exist
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. nonexistent_file.txt does not exist

Modified: hive/branches/spark/ql/src/test/results/clientnegative/update_partition_col.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/update_partition_col.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/update_partition_col.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/update_partition_col.q.out Mon Oct  6 04:00:39 2014
@@ -1,8 +1,8 @@
-PREHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc
+PREHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@foo
-POSTHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc
+POSTHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@foo

Modified: hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/acid_vectorization.q.out Mon Oct  6 04:00:39 2014
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC
+PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_vectorized
-POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC
+POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_vectorized

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out Mon Oct  6 04:00:39 2014
@@ -65,9 +65,11 @@ value               	string             
 #### A masked pattern was here ####
 PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
 PREHOOK: Output: default@src_orc_merge_test_stat
 POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
 POSTHOOK: Output: default@src_orc_merge_test_stat
 PREHOOK: query: desc formatted  src_orc_merge_test_stat
 PREHOOK: type: DESCTABLE
@@ -115,9 +117,11 @@ POSTHOOK: Input: default@src_orc_merge_t
 POSTHOOK: Output: default@src_orc_merge_test_stat
 PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
 PREHOOK: Output: default@src_orc_merge_test_stat
 POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
 POSTHOOK: Output: default@src_orc_merge_test_stat
 PREHOOK: query: desc formatted src_orc_merge_test_stat
 PREHOOK: type: DESCTABLE
@@ -243,8 +247,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
-	numRows             	500                 
-	rawDataSize         	47000               
+	numRows             	1500                
+	rawDataSize         	141000              
 	totalSize           	7488                
 #### A masked pattern was here ####
 	 	 
@@ -260,10 +264,12 @@ Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
 PREHOOK: Output: default@src_orc_merge_test_part_stat
 PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
 POSTHOOK: Output: default@src_orc_merge_test_part_stat
 POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
@@ -317,10 +323,12 @@ POSTHOOK: Input: default@src_orc_merge_t
 POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
 PREHOOK: Output: default@src_orc_merge_test_part_stat
 PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
 POSTHOOK: Output: default@src_orc_merge_test_part_stat
 POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out Mon Oct  6 04:00:39 2014
@@ -358,11 +358,11 @@ Table:              	tst1               
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	false               
+	COLUMN_STATS_ACCURATE	true                
 #### A masked pattern was here ####
 	numFiles            	12                  
-	numRows             	-1                  
-	rawDataSize         	-1                  
+	numRows             	500                 
+	rawDataSize         	5312                
 	totalSize           	5812                
 #### A masked pattern was here ####
 	 	 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_coltype.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_coltype.q.out Mon Oct  6 04:00:39 2014 differ