You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/11/12 04:59:25 UTC

[01/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Repository: hive
Updated Branches:
  refs/heads/spark b02cd4abc -> cad0ea6a9


http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index cf975d1..9b9fb71 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -110,14 +110,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -208,11 +208,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
index 88c09d9..2d81305 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
@@ -46,13 +46,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--13326.0	528534767	true	1969-12-31 15:59:46.674	-13326	528534767	1	-13
--15813.0	528534767	true	1969-12-31 15:59:55.787	-15813	528534767	1	-4
--9566.0	528534767	true	1969-12-31 15:59:44.187	-9566	528534767	1	-16
-15007.0	528534767	true	1969-12-31 15:59:50.434	15007	528534767	1	-10
-7021.0	528534767	true	1969-12-31 16:00:15.007	7021	528534767	1	15
-4963.0	528534767	true	1969-12-31 16:00:07.021	4963	528534767	1	7
--7824.0	528534767	true	1969-12-31 16:00:04.963	-7824	528534767	1	5
--15431.0	528534767	true	1969-12-31 15:59:52.176	-15431	528534767	1	-8
--15549.0	528534767	true	1969-12-31 15:59:44.569	-15549	528534767	1	-15
-5780.0	528534767	true	1969-12-31 15:59:44.451	5780	528534767	1	-16
+-13326.0	528534767	true	1969-12-31 15:59:46.674	-13326.0000000000	528534767.00000000000000	1.00	-13
+-15813.0	528534767	true	1969-12-31 15:59:55.787	-15813.0000000000	528534767.00000000000000	1.00	-4
+-9566.0	528534767	true	1969-12-31 15:59:44.187	-9566.0000000000	528534767.00000000000000	1.00	-16
+15007.0	528534767	true	1969-12-31 15:59:50.434	15007.0000000000	528534767.00000000000000	1.00	-10
+7021.0	528534767	true	1969-12-31 16:00:15.007	7021.0000000000	528534767.00000000000000	1.00	15
+4963.0	528534767	true	1969-12-31 16:00:07.021	4963.0000000000	528534767.00000000000000	1.00	7
+-7824.0	528534767	true	1969-12-31 16:00:04.963	-7824.0000000000	528534767.00000000000000	1.00	5
+-15431.0	528534767	true	1969-12-31 15:59:52.176	-15431.0000000000	528534767.00000000000000	1.00	-8
+-15549.0	528534767	true	1969-12-31 15:59:44.569	-15549.0000000000	528534767.00000000000000	1.00	-15
+5780.0	528534767	true	1969-12-31 15:59:44.451	5780.0000000000	528534767.00000000000000	1.00	-16

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index 6369bc8..e57d6c1 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -77,13 +77,13 @@ LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
-1836.441995841977	-1166.027234927254	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
-1856.1322245322462	-1178.5293139292924	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
-1858.7575883576155	-1180.196257796231	0.837241711366943	251986.76756757565	5.772972973	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
-1862.6956340956693	-1182.6966735966386	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
-1883.6985446985233	-1196.0322245322466	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
-1886.3239085238924	-1197.6991683991848	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
-1887.636590436577	-1198.532640332654	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
-1895.5126819126846	-1203.5334719334692	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
-1909.9521829522155	-1212.701663201631	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
-1913.8902286902692	-1215.2020790020384	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459
+1836.44199584197700	-1166.02723492725400	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
+1856.13222453224620	-1178.52931392929240	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
+1858.75758835761550	-1180.19625779623100	0.837241711366943	251986.76756757565	5.7729729730	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
+1862.69563409566930	-1182.69667359663860	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
+1883.69854469852330	-1196.03222453224660	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
+1886.32390852389240	-1197.69916839918480	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
+1887.63659043657700	-1198.53264033265400	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
+1895.51268191268460	-1203.53347193346920	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
+1909.95218295221550	-1212.70166320163100	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
+1913.89022869026920	-1215.20207900203840	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
index cf48a32..d3356ed 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
@@ -155,109 +155,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
index f2aaf8d..c5ab8a7 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
@@ -99,13 +99,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -122,14 +122,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -182,13 +182,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -205,14 +205,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -265,37 +265,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -348,13 +348,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -371,14 +371,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -431,13 +431,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -454,14 +454,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -514,13 +514,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -537,14 +537,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -637,7 +637,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
index 0151b04..5291609 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
@@ -25,7 +25,7 @@ POSTHOOK: query: select * from decimal_tbl_1_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555
+55555.000000000000000000
 PREHOOK: query: -- EXPLAIN
 -- SELECT dec, round(null), round(null, 0), round(125, null), 
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
@@ -114,7 +114,7 @@ FROM decimal_tbl_1_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555	55555	55555	55555	55555	55560	55600	56000	60000	100000	0	0	0
+55555	55555	55555.0	55555.00	55555.000	55560	55600	56000	60000	100000	0	0	0
 PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -143,7 +143,7 @@ POSTHOOK: query: select * from decimal_tbl_2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125.315	-125.315
+125.315000000000000000	-125.315000000000000000
 PREHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
@@ -226,7 +226,7 @@ FROM decimal_tbl_2_orc ORDER BY p
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125	125	125.3	125.32	125.315	125.315	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.315	-130	-100	0	0
+125	125	125.3	125.32	125.315	125.3150	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.3150	-130	-100	0	0
 PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -254,7 +254,7 @@ POSTHOOK: query: select * from decimal_tbl_3_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-3.141592653589793
+3.141592653589793000
 PREHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
@@ -381,7 +381,7 @@ FROM decimal_tbl_3_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.14159265359	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.141592653589793
+0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.141592653590	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.1415926535897930
 PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -410,7 +410,7 @@ POSTHOOK: query: select * from decimal_tbl_4_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_4_orc
 #### A masked pattern was here ####
-1809242.3151111344	-1809242.3151111344
+1809242.315111134400000000	-1809242.315111134400000000
 PREHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
index ffdb1c9..7dea1a2 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
@@ -73,16 +73,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
index cfd2a55..6837b76 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
@@ -92,44 +92,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -172,44 +172,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -414,44 +414,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -494,44 +494,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -736,42 +736,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -819,29 +819,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -884,44 +884,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -1220,40 +1220,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1299,30 +1299,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1516,44 +1516,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1643,23 +1643,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1704,44 +1704,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1773,44 +1773,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2019,42 +2019,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -2182,44 +2182,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2498,7 +2498,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2558,7 +2558,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 77dc175..5352885 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -113,56 +113,56 @@ LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
--1073051226	-7382.0	-4409.2486486486	-5280.969230769231	-4409.2486486486
--1072081801	8373.0	5001.1702702703	5989.915384615385	5001.1702702703
--1072076362	-5470.0	-3267.2162162162	-3913.1538461538466	-3267.2162162162
--1070883071	-741.0	-442.5972972973	-530.1	-442.5972972973
--1070551679	-947.0	-565.6405405405	-677.4692307692308	-565.6405405405
--1069512165	11417.0	6819.3432432432	8167.546153846154	6819.3432432432
--1069109166	8390.0	5011.3243243243	6002.076923076923	5011.3243243243
--1068623584	-14005.0	-8365.1486486486	-10018.961538461539	-8365.1486486486
--1067386090	-3977.0	-2375.4513513514	-2845.084615384616	-2375.4513513514
--1066922682	-9987.0	-5965.2081081081	-7144.546153846154	-5965.2081081081
--1066226047	-9439.0	-5637.8891891892	-6752.515384615385	-5637.8891891892
--1065117869	2538.0	1515.9405405405	1815.646153846154	1515.9405405405
--1064949302	6454.0	3854.9567567568	4617.092307692308	3854.9567567568
--1063498122	-11480.0	-6856.972972973	-8212.615384615387	-6856.972972973
--1062973443	10541.0	6296.1108108108	7540.869230769231	6296.1108108108
--1061614989	-4234.0	-2528.9567567568	-3028.938461538462	-2528.9567567568
--1061057428	-1085.0	-648.0675675676	-776.1923076923077	-648.0675675676
--1059941909	8782.0	5245.4648648649	6282.507692307693	5245.4648648649
--1059338191	7322.0	4373.4108108108	5238.046153846154	4373.4108108108
--1059047258	12452.0	7437.5459459459	8907.969230769231	7437.5459459459
--1056684111	13991.0	8356.7864864865	10008.946153846155	8356.7864864865
--1055945837	13690.0	8177	9793.615384615387	8177
--1055669248	2570.0	1535.0540540541	1838.538461538462	1535.0540540541
--1055316250	-14990.0	-8953.4864864865	-10723.615384615385	-8953.4864864865
--1053385587	14504.0	8663.2	10375.938461538462	8663.2
--1053238077	-3704.0	-2212.3891891892	-2649.784615384616	-2212.3891891892
--1052745800	-12404.0	-7408.8756756757	-8873.630769230771	-7408.8756756757
--1052322972	-7433.0	-4439.7108108108	-5317.453846153847	-4439.7108108108
--1050684541	-8261.0	-4934.272972973	-5909.792307692308	-4934.272972973
--1050657303	-6999.0	-4180.4837837838	-5006.976923076923	-4180.4837837838
--1050165799	8634.0	5157.0648648649	6176.63076923077	5157.0648648649
+-1073051226	-7382.0	-4409.2486486486	-5280.96923076923100	-4409.2486486486
+-1072081801	8373.0	5001.1702702703	5989.91538461538500	5001.1702702703
+-1072076362	-5470.0	-3267.2162162162	-3913.15384615384660	-3267.2162162162
+-1070883071	-741.0	-442.5972972973	-530.10000000000000	-442.5972972973
+-1070551679	-947.0	-565.6405405405	-677.46923076923080	-565.6405405405
+-1069512165	11417.0	6819.3432432432	8167.54615384615400	6819.3432432432
+-1069109166	8390.0	5011.3243243243	6002.07692307692300	5011.3243243243
+-1068623584	-14005.0	-8365.1486486486	-10018.96153846153900	-8365.1486486486
+-1067386090	-3977.0	-2375.4513513514	-2845.08461538461600	-2375.4513513514
+-1066922682	-9987.0	-5965.2081081081	-7144.54615384615400	-5965.2081081081
+-1066226047	-9439.0	-5637.8891891892	-6752.51538461538500	-5637.8891891892
+-1065117869	2538.0	1515.9405405405	1815.64615384615400	1515.9405405405
+-1064949302	6454.0	3854.9567567568	4617.09230769230800	3854.9567567568
+-1063498122	-11480.0	-6856.9729729730	-8212.61538461538700	-6856.9729729730
+-1062973443	10541.0	6296.1108108108	7540.86923076923100	6296.1108108108
+-1061614989	-4234.0	-2528.9567567568	-3028.93846153846200	-2528.9567567568
+-1061057428	-1085.0	-648.0675675676	-776.19230769230770	-648.0675675676
+-1059941909	8782.0	5245.4648648649	6282.50769230769300	5245.4648648649
+-1059338191	7322.0	4373.4108108108	5238.04615384615400	4373.4108108108
+-1059047258	12452.0	7437.5459459459	8907.96923076923100	7437.5459459459
+-1056684111	13991.0	8356.7864864865	10008.94615384615500	8356.7864864865
+-1055945837	13690.0	8177.0000000000	9793.61538461538700	8177.0000000000
+-1055669248	2570.0	1535.0540540541	1838.53846153846200	1535.0540540541
+-1055316250	-14990.0	-8953.4864864865	-10723.61538461538500	-8953.4864864865
+-1053385587	14504.0	8663.2000000000	10375.93846153846200	8663.2000000000
+-1053238077	-3704.0	-2212.3891891892	-2649.78461538461600	-2212.3891891892
+-1052745800	-12404.0	-7408.8756756757	-8873.63076923077100	-7408.8756756757
+-1052322972	-7433.0	-4439.7108108108	-5317.45384615384700	-4439.7108108108
+-1050684541	-8261.0	-4934.2729729730	-5909.79230769230800	-4934.2729729730
+-1050657303	-6999.0	-4180.4837837838	-5006.97692307692300	-4180.4837837838
+-1050165799	8634.0	5157.0648648649	6176.63076923077000	5157.0648648649
 -1048934049	-524.0	-312.9837837838	-374.86153846153854	-312.9837837838
--1046399794	4130.0	2466.8378378378	2954.5384615384614	2466.8378378378
--1045867222	-8034.0	-4798.6864864865	-5747.400000000001	-4798.6864864865
--1045196363	-5039.0	-3009.7810810811	-3604.823076923077	-3009.7810810811
--1045181724	-5706.0	-3408.1783783784	-4081.9846153846156	-3408.1783783784
--1045087657	-5865.0	-3503.1486486486	-4195.7307692307695	-3503.1486486486
--1044207190	5381.0	3214.0567567568	3849.4846153846156	3214.0567567568
--1044093617	-3422.0	-2043.9513513514	-2448.046153846154	-2043.9513513514
--1043573508	16216.0	9685.772972973	11600.676923076924	9685.772972973
--1043132597	12302.0	7347.9513513514	8800.66153846154	7347.9513513514
--1043082182	9180.0	5483.1891891892	6567.2307692307695	5483.1891891892
--1042805968	5133.0	3065.927027027	3672.0692307692307	3065.927027027
--1042712895	9296.0	5552.4756756757	6650.215384615385	5552.4756756757
--1042396242	9583.0	5723.9	6855.53076923077	5723.9
--1041734429	-836.0	-499.3405405405	-598.0615384615385	-499.3405405405
--1041391389	-12970.0	-7746.9459459459	-9278.538461538463	-7746.9459459459
--1041252354	756.0	451.5567567568	540.8307692307692	451.5567567568
--1039776293	13704.0	8185.3621621622	9803.630769230771	8185.3621621622
--1039762548	-3802.0	-2270.9243243243	-2719.8923076923083	-2270.9243243243
+-1046399794	4130.0	2466.8378378378	2954.53846153846140	2466.8378378378
+-1045867222	-8034.0	-4798.6864864865	-5747.40000000000100	-4798.6864864865
+-1045196363	-5039.0	-3009.7810810811	-3604.82307692307700	-3009.7810810811
+-1045181724	-5706.0	-3408.1783783784	-4081.98461538461560	-3408.1783783784
+-1045087657	-5865.0	-3503.1486486486	-4195.73076923076950	-3503.1486486486
+-1044207190	5381.0	3214.0567567568	3849.48461538461560	3214.0567567568
+-1044093617	-3422.0	-2043.9513513514	-2448.04615384615400	-2043.9513513514
+-1043573508	16216.0	9685.7729729730	11600.67692307692400	9685.7729729730
+-1043132597	12302.0	7347.9513513514	8800.66153846154000	7347.9513513514
+-1043082182	9180.0	5483.1891891892	6567.23076923076950	5483.1891891892
+-1042805968	5133.0	3065.9270270270	3672.06923076923070	3065.9270270270
+-1042712895	9296.0	5552.4756756757	6650.21538461538500	5552.4756756757
+-1042396242	9583.0	5723.9000000000	6855.53076923077000	5723.9000000000
+-1041734429	-836.0	-499.3405405405	-598.06153846153850	-499.3405405405
+-1041391389	-12970.0	-7746.9459459459	-9278.53846153846300	-7746.9459459459
+-1041252354	756.0	451.5567567568	540.83076923076920	451.5567567568
+-1039776293	13704.0	8185.3621621622	9803.63076923077100	8185.3621621622
+-1039762548	-3802.0	-2270.9243243243	-2719.89230769230830	-2270.9243243243
 PREHOOK: query: SELECT sum(hash(*))
   FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
         WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/windowing_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_decimal.q.out b/ql/src/test/results/clientpositive/windowing_decimal.q.out
index 60563ba..b157a23 100644
--- a/ql/src/test/results/clientpositive/windowing_decimal.q.out
+++ b/ql/src/test/results/clientpositive/windowing_decimal.q.out
@@ -57,32 +57,32 @@ from part_dec
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part_dec
 #### A masked pattern was here ####
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1414.42	1173.15	3760.72
-Manufacturer#1	1602.59	1173.15	5363.31
-Manufacturer#1	1632.66	1173.15	6995.97
-Manufacturer#1	1753.76	1173.15	8749.73
-Manufacturer#2	1690.68	1690.68	1690.68
-Manufacturer#2	1698.66	1690.68	3389.34
-Manufacturer#2	1701.6	1690.68	5090.94
-Manufacturer#2	1800.7	1690.68	6891.64
-Manufacturer#2	2031.98	1690.68	8923.62
-Manufacturer#3	1190.27	1190.27	1190.27
-Manufacturer#3	1337.29	1190.27	2527.56
-Manufacturer#3	1410.39	1190.27	3937.95
-Manufacturer#3	1671.68	1190.27	5609.63
-Manufacturer#3	1922.98	1190.27	7532.61
-Manufacturer#4	1206.26	1206.26	1206.26
-Manufacturer#4	1290.35	1206.26	2496.61
-Manufacturer#4	1375.42	1206.26	3872.03
-Manufacturer#4	1620.67	1206.26	5492.7
-Manufacturer#4	1844.92	1206.26	7337.62
-Manufacturer#5	1018.1	1018.1	1018.1
-Manufacturer#5	1464.48	1018.1	2482.58
-Manufacturer#5	1611.66	1018.1	4094.24
-Manufacturer#5	1788.73	1018.1	5882.97
-Manufacturer#5	1789.69	1018.1	7672.66
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1414.420	1173.150	3760.720
+Manufacturer#1	1602.590	1173.150	5363.310
+Manufacturer#1	1632.660	1173.150	6995.970
+Manufacturer#1	1753.760	1173.150	8749.730
+Manufacturer#2	1690.680	1690.680	1690.680
+Manufacturer#2	1698.660	1690.680	3389.340
+Manufacturer#2	1701.600	1690.680	5090.940
+Manufacturer#2	1800.700	1690.680	6891.640
+Manufacturer#2	2031.980	1690.680	8923.620
+Manufacturer#3	1190.270	1190.270	1190.270
+Manufacturer#3	1337.290	1190.270	2527.560
+Manufacturer#3	1410.390	1190.270	3937.950
+Manufacturer#3	1671.680	1190.270	5609.630
+Manufacturer#3	1922.980	1190.270	7532.610
+Manufacturer#4	1206.260	1206.260	1206.260
+Manufacturer#4	1290.350	1206.260	2496.610
+Manufacturer#4	1375.420	1206.260	3872.030
+Manufacturer#4	1620.670	1206.260	5492.700
+Manufacturer#4	1844.920	1206.260	7337.620
+Manufacturer#5	1018.100	1018.100	1018.100
+Manufacturer#5	1464.480	1018.100	2482.580
+Manufacturer#5	1611.660	1018.100	4094.240
+Manufacturer#5	1788.730	1018.100	5882.970
+Manufacturer#5	1789.690	1018.100	7672.660
 PREHOOK: query: select p_mfgr, p_retailprice, 
 first_value(p_retailprice) over(partition by p_mfgr order by p_retailprice range between 5 preceding and current row) ,
 sum(p_retailprice) over(partition by p_mfgr order by p_retailprice range between 5 preceding and current row)
@@ -97,29 +97,29 @@ from part_dec
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part_dec
 #### A masked pattern was here ####
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1414.42	1414.42	1414.42
-Manufacturer#1	1602.59	1602.59	1602.59
-Manufacturer#1	1632.66	1632.66	1632.66
-Manufacturer#1	1753.76	1753.76	1753.76
-Manufacturer#2	1690.68	1690.68	1690.68
-Manufacturer#2	1698.66	1698.66	1698.66
-Manufacturer#2	1701.6	1698.66	3400.26
-Manufacturer#2	1800.7	1800.7	1800.7
-Manufacturer#2	2031.98	2031.98	2031.98
-Manufacturer#3	1190.27	1190.27	1190.27
-Manufacturer#3	1337.29	1337.29	1337.29
-Manufacturer#3	1410.39	1410.39	1410.39
-Manufacturer#3	1671.68	1671.68	1671.68
-Manufacturer#3	1922.98	1922.98	1922.98
-Manufacturer#4	1206.26	1206.26	1206.26
-Manufacturer#4	1290.35	1290.35	1290.35
-Manufacturer#4	1375.42	1375.42	1375.42
-Manufacturer#4	1620.67	1620.67	1620.67
-Manufacturer#4	1844.92	1844.92	1844.92
-Manufacturer#5	1018.1	1018.1	1018.1
-Manufacturer#5	1464.48	1464.48	1464.48
-Manufacturer#5	1611.66	1611.66	1611.66
-Manufacturer#5	1788.73	1788.73	1788.73
-Manufacturer#5	1789.69	1788.73	3578.42
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1414.420	1414.420	1414.420
+Manufacturer#1	1602.590	1602.590	1602.590
+Manufacturer#1	1632.660	1632.660	1632.660
+Manufacturer#1	1753.760	1753.760	1753.760
+Manufacturer#2	1690.680	1690.680	1690.680
+Manufacturer#2	1698.660	1698.660	1698.660
+Manufacturer#2	1701.600	1698.660	3400.260
+Manufacturer#2	1800.700	1800.700	1800.700
+Manufacturer#2	2031.980	2031.980	2031.980
+Manufacturer#3	1190.270	1190.270	1190.270
+Manufacturer#3	1337.290	1337.290	1337.290
+Manufacturer#3	1410.390	1410.390	1410.390
+Manufacturer#3	1671.680	1671.680	1671.680
+Manufacturer#3	1922.980	1922.980	1922.980
+Manufacturer#4	1206.260	1206.260	1206.260
+Manufacturer#4	1290.350	1290.350	1290.350
+Manufacturer#4	1375.420	1375.420	1375.420
+Manufacturer#4	1620.670	1620.670	1620.670
+Manufacturer#4	1844.920	1844.920	1844.920
+Manufacturer#5	1018.100	1018.100	1018.100
+Manufacturer#5	1464.480	1464.480	1464.480
+Manufacturer#5	1611.660	1611.660	1611.660
+Manufacturer#5	1788.730	1788.730	1788.730
+Manufacturer#5	1789.690	1788.730	3578.420

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/windowing_navfn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out
index ae7d95a..a79fccc 100644
--- a/ql/src/test/results/clientpositive/windowing_navfn.q.out
+++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out
@@ -287,13 +287,13 @@ POSTHOOK: Input: default@over10k
 65536	98.42
 65536	0.93
 65536	83.48
-65536	75.7
+65536	75.70
 65536	88.04
 65536	94.09
 65536	33.45
 65536	44.41
 65536	22.15
-65536	20.5
+65536	20.50
 65536	58.86
 65536	30.91
 65536	74.47
@@ -310,9 +310,9 @@ POSTHOOK: Input: default@over10k
 65536	80.26
 65536	35.07
 65536	95.88
-65536	30.6
+65536	30.60
 65536	46.97
-65536	58.8
+65536	58.80
 65536	5.72
 65536	29.27
 65536	62.25
@@ -336,7 +336,7 @@ POSTHOOK: Input: default@over10k
 65537	35.86
 65537	47.75
 65537	1.12
-65537	52.9
+65537	52.90
 65537	53.92
 65537	43.45
 65537	7.52
@@ -350,20 +350,20 @@ POSTHOOK: Input: default@over10k
 65537	56.48
 65537	83.21
 65537	56.52
-65537	36.6
-65537	59.7
+65537	36.60
+65537	59.70
 65537	80.14
-65537	66.3
+65537	66.30
 65537	94.87
 65537	40.92
-65537	25.2
+65537	25.20
 65537	7.36
 65538	NULL
 65538	53.35
 65538	54.64
 65538	76.67
 65538	15.17
-65538	1.2
+65538	1.20
 65538	13.71
 65538	81.59
 65538	43.33

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/windowing_rank.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_rank.q.out b/ql/src/test/results/clientpositive/windowing_rank.q.out
index 6a74a8e..67975f3 100644
--- a/ql/src/test/results/clientpositive/windowing_rank.q.out
+++ b/ql/src/test/results/clientpositive/windowing_rank.q.out
@@ -508,16 +508,16 @@ where rnk =  1 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
 PREHOOK: query: select ts, dec, rnk
 from
   (select ts, dec,
@@ -546,16 +546,16 @@ where dec = 89.5 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
 PREHOOK: query: select ts, dec, rnk
 from
   (select ts, dec,
@@ -586,13 +586,13 @@ where rnk = 1 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
index aeb5adc..e311cf9 100644
--- a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
+++ b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
@@ -215,18 +215,18 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp
 #### A masked pattern was here ####
 10	7839	NULL	5000.0	5000.0	5000.0	NULL	NULL	1687.5	5000.0
-10	7782	50	2450.0	2450.0	1687.5	NULL	1500.0	NULL	2350.0
-10	7934	100	1300.0	1875.0	1687.5	NULL	NULL	NULL	2350.0
-10	7987	150.5	1500.0	1750.0	1687.5	NULL	NULL	NULL	2350.0
-10	7988	200	1500.0	1687.5	1687.5	2450.0	NULL	NULL	2350.0
+10	7782	50.00	2450.0	2450.0	1687.5	NULL	1500.0	NULL	2350.0
+10	7934	100.00	1300.0	1875.0	1687.5	NULL	NULL	NULL	2350.0
+10	7987	150.50	1500.0	1750.0	1687.5	NULL	NULL	NULL	2350.0
+10	7988	200.00	1500.0	1687.5	1687.5	2450.0	NULL	NULL	2350.0
 20	7788	NULL	3000.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7902	NULL	3000.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7876	NULL	1100.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7369	NULL	800.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
-20	7566	100	2975.0	2975.0	2975.0	NULL	NULL	NULL	2175.0
+20	7566	100.00	2975.0	2975.0	2975.0	NULL	NULL	NULL	2175.0
 30	7900	NULL	950.0	1900.0	1900.0	NULL	NULL	1400.0	1900.0
 30	7698	NULL	2850.0	1900.0	1900.0	NULL	NULL	1400.0	1900.0
-30	7499	200.5	1600.0	1600.0	1450.0	NULL	NULL	1250.0	1630.0
-30	7844	300	1500.0	1550.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
-30	7521	300.5	1250.0	1450.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
-30	7654	500	1250.0	1333.3333333333333	1333.3333333333333	1375.0	NULL	NULL	1566.6666666666667
+30	7499	200.50	1600.0	1600.0	1450.0	NULL	NULL	1250.0	1630.0
+30	7844	300.00	1500.0	1550.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
+30	7521	300.50	1250.0	1450.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
+30	7654	500.00	1250.0	1333.3333333333333	1333.3333333333333	1375.0	NULL	NULL	1566.6666666666667

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
index 709e53f..9ea6e91 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
@@ -366,7 +366,7 @@ public final class BinarySortableSerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal dec) throws IOException {
+  public void writeHiveDecimal(HiveDecimal dec, int scale) throws IOException {
     final boolean invert = columnSortOrderIsDesc[++index];
 
     // This field is not a null.

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
index e6fb8b6..21daa8b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
@@ -151,5 +151,5 @@ public interface SerializeWrite {
   /*
    * DECIMAL.
    */
-  void writeHiveDecimal(HiveDecimal dec) throws IOException;
+  void writeHiveDecimal(HiveDecimal dec, int scale) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
index 40601c0..4e82e9b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
@@ -102,12 +102,12 @@ public class LazyHiveDecimal extends LazyPrimitive<LazyHiveDecimalObjectInspecto
    * @param hiveDecimal
    * @throws IOException
    */
-  public static void writeUTF8(OutputStream outputStream, HiveDecimal hiveDecimal)
+  public static void writeUTF8(OutputStream outputStream, HiveDecimal hiveDecimal, int scale)
     throws IOException {
     if (hiveDecimal == null) {
       outputStream.write(nullBytes);
     } else {
-      ByteBuffer b = Text.encode(hiveDecimal.toString());
+      ByteBuffer b = Text.encode(hiveDecimal.toFormatString(scale));
       outputStream.write(b.array(), 0, b.limit());
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
index d6b2219..29d6ad8 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
@@ -304,8 +304,9 @@ public final class LazyUtils {
       break;
     }
     case DECIMAL: {
+      HiveDecimalObjectInspector decimalOI = (HiveDecimalObjectInspector) oi;
       LazyHiveDecimal.writeUTF8(out,
-        ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o));
+        decimalOI.getPrimitiveJavaObject(o), decimalOI.scale());
       break;
     }
     default: {

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
index 986d246..b64a803 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.serde2.lazy.fast;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
 import java.sql.Date;
 import java.sql.Timestamp;
 
@@ -34,7 +33,6 @@ import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
 import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
@@ -47,13 +45,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazyLong;
 import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
 import org.apache.hadoop.hive.serde2.lazy.LazyTimestamp;
 import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
-import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyObjectInspectorParameters;
-import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
 import org.apache.hadoop.hive.serde2.fast.SerializeWrite;
 import org.apache.hadoop.io.Text;
 import org.apache.hive.common.util.DateUtils;
@@ -506,13 +497,12 @@ public final class LazySimpleSerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal v) throws IOException {
-
+  public void writeHiveDecimal(HiveDecimal v, int scale) throws IOException {
     if (index > 0) {
       output.write(separator);
     }
 
-    LazyHiveDecimal.writeUTF8(output, v);
+    LazyHiveDecimal.writeUTF8(output, v, scale);
 
     index++;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
index ebe4181..8f81df6 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
@@ -713,7 +713,7 @@ public class LazyBinarySerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal v) throws IOException {
+  public void writeHiveDecimal(HiveDecimal v, int scale) throws IOException {
 
     // Every 8 fields we write a NULL byte.
     if ((fieldIndex % 8) == 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
index fa46c9e..fc845a5 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
@@ -259,12 +259,12 @@ public class VerifyFast {
     }
   }
 
-  public static void serializeWrite(SerializeWrite serializeWrite, PrimitiveCategory primitiveCategory, Object object) throws IOException {
+  public static void serializeWrite(SerializeWrite serializeWrite, PrimitiveTypeInfo primitiveTypeInfo, Object object) throws IOException {
     if (object == null) {
       serializeWrite.writeNull();
       return;
     }
-    switch (primitiveCategory) {
+    switch (primitiveTypeInfo.getPrimitiveCategory()) {
       case BOOLEAN:
       {
         boolean value = (Boolean) object;
@@ -330,7 +330,8 @@ public class VerifyFast {
     case DECIMAL:
       {
         HiveDecimal value = (HiveDecimal) object;
-        serializeWrite.writeHiveDecimal(value);
+        DecimalTypeInfo decTypeInfo = (DecimalTypeInfo)primitiveTypeInfo;
+        serializeWrite.writeHiveDecimal(value, decTypeInfo.scale());
       }
       break;
     case DATE:
@@ -365,7 +366,7 @@ public class VerifyFast {
       }
       break;
     default:
-      throw new Error("Unknown primitive category " + primitiveCategory.name());
+      throw new Error("Unknown primitive category " + primitiveTypeInfo.getPrimitiveCategory().name());
     }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
index 4438bdc..ae476ae 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
@@ -62,8 +62,7 @@ public class TestBinarySortableFast extends TestCase {
       int[] perFieldWriteLengths = new int[MyTestPrimitiveClass.primitiveCount];
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(binarySortableSerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(binarySortableSerializeWrite, primitiveTypeInfoMap.get(t)[index], object);
         perFieldWriteLengths[index] = output.getLength();
       }
       perFieldWriteLengthsArray[i] = perFieldWriteLengths;

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
index 951d91a..7ebe7ae 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
@@ -61,8 +61,7 @@ public class TestLazySimpleFast extends TestCase {
 
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(lazySimpleSerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(lazySimpleSerializeWrite, primitiveTypeInfosArray[i][index], object);
       }
 
       BytesWritable bytesWritable = new BytesWritable();

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
index a169586..4032743 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
@@ -60,8 +60,7 @@ public class TestLazyBinaryFast extends TestCase {
 
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(lazyBinarySerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(lazyBinarySerializeWrite, primitiveTypeInfosArray[i][index], object);
       }
 
       BytesWritable bytesWritable = new BytesWritable();

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
index 4ed17a2..1c6be91 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
@@ -102,6 +102,17 @@ public class HiveDecimal implements Comparable<HiveDecimal> {
   public String toString() {
      return bd.toPlainString();
   }
+  
+  /**
+   * Return a string representation of the number with the number of decimal digits as
+   * the given scale. Please note that this is different from toString().
+   * @param scale the number of digits after the decimal point
+   * @return the string representation of exact number of decimal digits
+   */
+  public String toFormatString(int scale) {
+    return (bd.scale() == scale ? bd :
+      bd.setScale(scale, RoundingMode.HALF_UP)).toPlainString();
+  }
 
   public HiveDecimal setScale(int i) {
     return new HiveDecimal(bd.setScale(i, RoundingMode.HALF_UP));

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
index a7d31fa..fe8ad85 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
@@ -17,10 +17,8 @@
  */
 
 package org.apache.hadoop.hive.ql.exec.vector;
-import java.io.IOException;
 import java.math.BigInteger;
 
-
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 


[30/55] [abbrv] hive git commit: HIVE-12325 : Turn hive.map.groupby.sorted on by default (Chetna Chaudhari via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12325 : Turn hive.map.groupby.sorted on by default (Chetna Chaudhari via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/58b85acc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/58b85acc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/58b85acc

Branch: refs/heads/spark
Commit: 58b85acca168fd179a0cd39fb735e21a361cb95d
Parents: 678b77b
Author: Chetna Chaudhari <ch...@gmail.com>
Authored: Thu Nov 5 20:44:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Nov 6 16:04:06 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   6 +-
 .../hive/ql/optimizer/GroupByOptimizer.java     |   8 --
 .../queries/clientpositive/groupby_sort_8.q     |   6 --
 .../clientpositive/groupby_sort_test_1.q        |   1 -
 .../clientpositive/auto_sortmerge_join_10.q.out | 100 +++++++------------
 .../results/clientpositive/bucket_groupby.q.out |  46 +++------
 .../results/clientpositive/groupby_sort_8.q.out |  64 ------------
 .../clientpositive/groupby_sort_test_1.q.out    |  87 ++++++++++------
 .../spark/auto_sortmerge_join_10.q.out          |  45 +++------
 .../tez/auto_sortmerge_join_10.q.out            |  71 ++++++-------
 10 files changed, 155 insertions(+), 279 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7272ea4..7a8517b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -862,14 +862,10 @@ public class HiveConf extends Configuration {
     HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
         "Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has \n" +
         "common group by keys, it will be optimized to generate single M/R job."),
-    HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false,
+    HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true,
         "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
         "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
         "is that it limits the number of mappers to the number of files."),
-    HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false,
-        "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
-        "the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan\n" +
-        "is not converted, but a query property is set to denote the same."),
     HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
         "Whether to enable using Column Position Alias in Group By or Order By"),
     HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
index f758776..fe459f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
@@ -212,11 +212,7 @@ public class GroupByOptimizer implements Transform {
         convertGroupByMapSideSortedGroupBy(hiveConf, groupByOp, depth);
       }
       else if (optimizeDistincts && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
-        // In test mode, dont change the query plan. However, setup a query property
         pGraphContext.getQueryProperties().setHasMapGroupBy(true);
-        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAP_GROUPBY_SORT_TESTMODE)) {
-          return;
-        }
         ReduceSinkOperator reduceSinkOp =
             (ReduceSinkOperator)groupByOp.getChildOperators().get(0);
         GroupByDesc childGroupByDesc =
@@ -518,11 +514,7 @@ public class GroupByOptimizer implements Transform {
     // The operators specified by depth and removed from the tree.
     protected void convertGroupByMapSideSortedGroupBy(
         HiveConf conf, GroupByOperator groupByOp, int depth) {
-      // In test mode, dont change the query plan. However, setup a query property
       pGraphContext.getQueryProperties().setHasMapGroupBy(true);
-      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MAP_GROUPBY_SORT_TESTMODE)) {
-        return;
-      }
 
       if (removeChildren(groupByOp, depth)) {
         // Use bucketized hive input format - that makes sure that one mapper reads the entire file

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/queries/clientpositive/groupby_sort_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_8.q b/ql/src/test/queries/clientpositive/groupby_sort_8.q
index f53295e..f0d3a59 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_8.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_8.q
@@ -18,10 +18,4 @@ EXPLAIN
 select count(distinct key) from T1;
 select count(distinct key) from T1;
 
-set hive.map.groupby.sorted.testmode=true;
--- In testmode, the plan is not changed
-EXPLAIN
-select count(distinct key) from T1;
-select count(distinct key) from T1;
-
 DROP TABLE T1;

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
index 4ec138e..70eef33 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
@@ -2,7 +2,6 @@ set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
-set hive.map.groupby.sorted.testmode=true;
 
 CREATE TABLE T1(key STRING, val STRING)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out
index e7f6de3..fb1e656 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out
@@ -242,15 +242,19 @@ select count(*) from
   on subq1.key = subq2.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-6 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-6
-  Stage-0 depends on stages: Stage-3
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        subq1:a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        subq1:a 
           TableScan
             alias: a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
@@ -259,43 +263,22 @@ STAGE PLANS:
               Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                bucketGroup: true
                 keys: key (type: int)
-                mode: hash
+                mode: final
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 _col0 (type: int)
+                      1 _col0 (type: int)
 
-  Stage: Stage-6
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        subq2:a 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        subq2:a 
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
           TableScan
             alias: a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
@@ -306,31 +289,22 @@ STAGE PLANS:
                 expressions: key (type: int)
                 outputColumnNames: _col0
                 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                HashTable Sink Operator
+                Map Join Operator
+                  condition map:
+                       Inner Join 0 to 1
                   keys:
                     0 _col0 (type: int)
                     1 _col0 (type: int)
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Map Join Operator
-              condition map:
-                   Inner Join 0 to 1
-              keys:
-                0 _col0 (type: int)
-                1 _col0 (type: int)
-              Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: bigint)
+                  Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: bigint)
       Local Work:
         Map Reduce Local Work
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_groupby.q.out b/ql/src/test/results/clientpositive/bucket_groupby.q.out
index 1b48d3a..1ac5287 100644
--- a/ql/src/test/results/clientpositive/bucket_groupby.q.out
+++ b/ql/src/test/results/clientpositive/bucket_groupby.q.out
@@ -1191,38 +1191,24 @@ STAGE PLANS:
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                bucketGroup: true
                 keys: _col0 (type: string), _col1 (type: string)
-                mode: hash
+                mode: final
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
-              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 10
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/groupby_sort_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_8.q.out b/ql/src/test/results/clientpositive/groupby_sort_8.q.out
index 5152385..5d8f513 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_8.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_8.q.out
@@ -101,70 +101,6 @@ POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t1@ds=1
 #### A masked pattern was here ####
 5
-PREHOOK: query: -- In testmode, the plan is not changed
-EXPLAIN
-select count(distinct key) from T1
-PREHOOK: type: QUERY
-POSTHOOK: query: -- In testmode, the plan is not changed
-EXPLAIN
-select count(distinct key) from T1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1
-            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(DISTINCT key)
-                keys: key (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(DISTINCT KEY._col0:0._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(distinct key) from T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
-PREHOOK: Input: default@t1@ds=1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(distinct key) from T1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1
-POSTHOOK: Input: default@t1@ds=1
-#### A masked pattern was here ####
-5
 PREHOOK: query: DROP TABLE T1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@t1

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
index 8c1765d..dfe0ff1 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
@@ -50,8 +50,13 @@ SELECT key, count(1) FROM T1 GROUP BY key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -67,34 +72,30 @@ STAGE PLANS:
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
-                mode: hash
+                mode: final
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.outputtbl1
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.outputtbl1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -109,3 +110,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.outputtbl1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.outputtbl1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out
index ee9f448..17d20cb 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out
@@ -206,8 +206,6 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-2
     Spark
-      Edges:
-        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -220,43 +218,28 @@ STAGE PLANS:
                     Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      bucketGroup: true
                       keys: key (type: int)
-                      mode: hash
+                      mode: final
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-        Reducer 2 
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Spark HashTable Sink Operator
+                          keys:
+                            0 _col0 (type: int)
+                            1 _col0 (type: int)
             Local Work:
               Map Reduce Local Work
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int)
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                  Spark HashTable Sink Operator
-                    keys:
-                      0 _col0 (type: int)
-                      1 _col0 (type: int)
 
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (GROUP, 1)
+        Reducer 3 <- Map 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
-        Map 3 
+        Map 2 
             Map Operator Tree:
                 TableScan
                   alias: a
@@ -275,7 +258,7 @@ STAGE PLANS:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
                         input vertices:
-                          0 Reducer 2
+                          0 Map 1
                         Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
@@ -288,7 +271,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Local Work:
               Map Reduce Local Work
-        Reducer 4 
+        Reducer 3 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/58b85acc/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out
index 0d22ea7..98e099c 100644
--- a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out
@@ -245,8 +245,8 @@ STAGE PLANS:
   Stage: Stage-1
     Tez
       Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (BROADCAST_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -259,18 +259,34 @@ STAGE PLANS:
                     Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      bucketGroup: true
                       keys: key (type: int)
-                      mode: hash
+                      mode: final
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-        Map 4 
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Map Join Operator
+                          condition map:
+                               Inner Join 0 to 1
+                          keys:
+                            0 _col0 (type: int)
+                            1 _col0 (type: int)
+                          input vertices:
+                            1 Map 3
+                          Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                          HybridGraceHashJoin: true
+                          Group By Operator
+                            aggregations: count()
+                            mode: hash
+                            outputColumnNames: _col0
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              sort order: 
+                              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                              value expressions: _col0 (type: bigint)
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
@@ -291,37 +307,6 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int)
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                  Map Join Operator
-                    condition map:
-                         Inner Join 0 to 1
-                    keys:
-                      0 _col0 (type: int)
-                      1 _col0 (type: int)
-                    input vertices:
-                      1 Map 4
-                    Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
-                    HybridGraceHashJoin: true
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE


[04/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
index 7c17733..a30820e 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,11 +184,11 @@ POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -197,11 +197,11 @@ POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -210,11 +210,11 @@ POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,11 +262,11 @@ POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -275,11 +275,11 @@ POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -288,16 +288,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -306,16 +306,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -324,16 +324,16 @@ POSTHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -342,16 +342,16 @@ POSTHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -360,16 +360,16 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -378,16 +378,16 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -396,11 +396,11 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -409,11 +409,11 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -422,16 +422,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -440,16 +440,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=12 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -458,16 +458,16 @@ POSTHOOK: query: select * from newtypestbl where d<=12 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=12 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -476,16 +476,16 @@ POSTHOOK: query: select * from newtypestbl where d<=12 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -494,11 +494,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -507,11 +507,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -520,16 +520,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -538,16 +538,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -572,11 +572,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -585,11 +585,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -598,16 +598,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -616,16 +616,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -634,11 +634,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -647,11 +647,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -660,16 +660,16 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -678,16 +678,16 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -696,11 +696,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -709,11 +709,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -722,11 +722,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -735,11 +735,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -748,11 +748,11 @@ POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -761,8 +761,8 @@ POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
index e314c10..3b3e5b7 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,16 +158,16 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -176,16 +176,16 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -210,11 +210,11 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,16 +236,16 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -254,16 +254,16 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -272,11 +272,11 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -285,11 +285,11 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -298,16 +298,16 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -316,16 +316,16 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -350,11 +350,11 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -363,11 +363,11 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -376,16 +376,16 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -394,16 +394,16 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
index 2e9f72f..5a62e80 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v<="world" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,16 +106,16 @@ POSTHOOK: query: select * from newtypestbl where v<="world" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v<="world" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -124,16 +124,16 @@ POSTHOOK: query: select * from newtypestbl where v<="world" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v="bee   "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,16 +184,16 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -202,16 +202,16 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v in ("orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,16 +262,16 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie" so
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -280,16 +280,16 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie" so
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
index aa3b272..980b65b 100644
--- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
@@ -241,7 +241,7 @@ POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_pred
@@ -250,7 +250,7 @@ POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/serde_regex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out
index ad3af57..7bebb0c 100644
--- a/ql/src/test/results/clientpositive/serde_regex.q.out
+++ b/ql/src/test/results/clientpositive/serde_regex.q.out
@@ -201,43 +201,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@serde_regex1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: DROP TABLE serde_regex1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@serde_regex1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
index 318be3d..cebc342 100644
--- a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
@@ -92,9 +92,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out b/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
index b2704c6..46fbeb7 100644
--- a/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
@@ -26,9 +26,9 @@ POSTHOOK: query: select * from decimal_1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_1_1
 #### A masked pattern was here ####
-0
-0
-0
+0.0
+0.0
+0.0
 0.1
 0.2
 0.9
@@ -37,13 +37,13 @@ NULL
 0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.9
@@ -52,10 +52,10 @@ NULL
 -0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
+0.0
 PREHOOK: query: select d from decimal_1_1 order by d desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_1_1
@@ -69,18 +69,18 @@ POSTHOOK: Input: default@decimal_1_1
 0.3
 0.2
 0.1
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.3

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
index 20b188b..bc785f9 100644
--- a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
@@ -172,112 +172,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -288,109 +288,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 133769a..f1ff784 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -650,34 +650,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
index e1acab1..c3e7779 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
@@ -204,13 +204,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index f6b2920..bcabc98 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -159,7 +159,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -252,7 +252,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education


[17/55] [abbrv] hive git commit: HIVE-12156: expanding view doesn't quote reserved keyword (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)

Posted by xu...@apache.org.
HIVE-12156: expanding view doesn't quote reserved keyword (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a46729b3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a46729b3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a46729b3

Branch: refs/heads/spark
Commit: a46729b3e3d2e902c46e6b0a553b6f22f529f5df
Parents: 0918ff9
Author: Pengcheng Xiong <px...@apache.org>
Authored: Thu Nov 5 11:26:05 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Thu Nov 5 11:26:05 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  37 ++++++
 .../queries/clientpositive/struct_in_view.q     |  28 +++++
 .../results/clientpositive/struct_in_view.q.out | 118 +++++++++++++++++++
 3 files changed, 183 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a46729b3/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d2c3a7c..f3d7057 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -26,6 +26,8 @@ import java.io.Serializable;
 import java.security.AccessControlException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -155,6 +157,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
@@ -10496,8 +10499,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       return nodeOutputs;
     }
 
+    Map<ExprNodeDesc,String> nodeToText = new HashMap<>();
+    List<Entry<ASTNode, ExprNodeDesc>> fieldDescList = new ArrayList<>();
+
     for (Map.Entry<ASTNode, ExprNodeDesc> entry : nodeOutputs.entrySet()) {
       if (!(entry.getValue() instanceof ExprNodeColumnDesc)) {
+        // we need to translate the ExprNodeFieldDesc too, e.g., identifiers in
+        // struct<>.
+        if (entry.getValue() instanceof ExprNodeFieldDesc) {
+          fieldDescList.add(entry);
+        }
         continue;
       }
       ASTNode node = entry.getKey();
@@ -10513,9 +10524,35 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
       replacementText.append(".");
       replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
+      nodeToText.put(columnDesc, replacementText.toString());
       unparseTranslator.addTranslation(node, replacementText.toString());
     }
 
+    if (fieldDescList.size() != 0) {
+      // Sorting the list based on the length of fieldName
+      // For example, in Column[a].b.c and Column[a].b, Column[a].b should be
+      // unparsed before Column[a].b.c
+      Collections.sort(fieldDescList, new Comparator<Map.Entry<ASTNode, ExprNodeDesc>>() {
+        public int compare(Entry<ASTNode, ExprNodeDesc> o1, Entry<ASTNode, ExprNodeDesc> o2) {
+          ExprNodeFieldDesc fieldDescO1 = (ExprNodeFieldDesc) o1.getValue();
+          ExprNodeFieldDesc fieldDescO2 = (ExprNodeFieldDesc) o2.getValue();
+          return fieldDescO1.toString().length() < fieldDescO2.toString().length() ? -1 : 1;
+        }
+      });
+      for (Map.Entry<ASTNode, ExprNodeDesc> entry : fieldDescList) {
+        ASTNode node = entry.getKey();
+        ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) entry.getValue();
+        ExprNodeDesc exprNodeDesc = fieldDesc.getDesc();
+        String fieldName = fieldDesc.getFieldName();
+        StringBuilder replacementText = new StringBuilder();
+        replacementText.append(nodeToText.get(exprNodeDesc));
+        replacementText.append(".");
+        replacementText.append(HiveUtils.unparseIdentifier(fieldName, conf));
+        nodeToText.put(fieldDesc, replacementText.toString());
+        unparseTranslator.addTranslation(node, replacementText.toString());
+      }
+    }
+
     return nodeOutputs;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a46729b3/ql/src/test/queries/clientpositive/struct_in_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/struct_in_view.q b/ql/src/test/queries/clientpositive/struct_in_view.q
new file mode 100644
index 0000000..d420030
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/struct_in_view.q
@@ -0,0 +1,28 @@
+drop table testreserved;
+
+create table testreserved (data struct<`end`:string, id: string>);
+
+create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved;
+
+describe extended testreservedview;
+
+select data.`end` from testreserved;
+
+drop view testreservedview;
+
+drop table testreserved;
+
+create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>);
+
+create view vs1 as select default.src.`end`.key from s;
+
+describe extended vs1;
+
+create view vs2 as select default.src.`end` from s;
+
+describe extended vs2;
+
+drop view vs1;
+
+drop view vs2;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/a46729b3/ql/src/test/results/clientpositive/struct_in_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/struct_in_view.q.out b/ql/src/test/results/clientpositive/struct_in_view.q.out
new file mode 100644
index 0000000..10b2f2e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/struct_in_view.q.out
@@ -0,0 +1,118 @@
+PREHOOK: query: drop table testreserved
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table testreserved
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table testreserved (data struct<`end`:string, id: string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testreserved
+POSTHOOK: query: create table testreserved (data struct<`end`:string, id: string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testreserved
+PREHOOK: query: create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testreserved
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testreservedview
+POSTHOOK: query: create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testreserved
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testreservedview
+PREHOOK: query: describe extended testreservedview
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@testreservedview
+POSTHOOK: query: describe extended testreservedview
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@testreservedview
+data_end            	string              	                    
+data_id             	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: select data.`end` from testreserved
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testreserved
+#### A masked pattern was here ####
+POSTHOOK: query: select data.`end` from testreserved
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testreserved
+#### A masked pattern was here ####
+PREHOOK: query: drop view testreservedview
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@testreservedview
+PREHOOK: Output: default@testreservedview
+POSTHOOK: query: drop view testreservedview
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@testreservedview
+POSTHOOK: Output: default@testreservedview
+PREHOOK: query: drop table testreserved
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testreserved
+PREHOOK: Output: default@testreserved
+POSTHOOK: query: drop table testreserved
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testreserved
+POSTHOOK: Output: default@testreserved
+PREHOOK: query: create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s
+POSTHOOK: query: create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s
+PREHOOK: query: create view vs1 as select default.src.`end`.key from s
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@s
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vs1
+POSTHOOK: query: create view vs1 as select default.src.`end`.key from s
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@s
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vs1
+PREHOOK: query: describe extended vs1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@vs1
+POSTHOOK: query: describe extended vs1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@vs1
+key                 	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: create view vs2 as select default.src.`end` from s
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@s
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vs2
+POSTHOOK: query: create view vs2 as select default.src.`end` from s
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@s
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vs2
+PREHOOK: query: describe extended vs2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@vs2
+POSTHOOK: query: describe extended vs2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@vs2
+end                 	struct<key:string>  	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: drop view vs1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@vs1
+PREHOOK: Output: default@vs1
+POSTHOOK: query: drop view vs1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@vs1
+POSTHOOK: Output: default@vs1
+PREHOOK: query: drop view vs2
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@vs2
+PREHOOK: Output: default@vs2
+POSTHOOK: query: drop view vs2
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@vs2
+POSTHOOK: Output: default@vs2


[19/55] [abbrv] hive git commit: HIVE-12230 custom UDF configure() not called in Vectorization mode (Matt McCline, reviewd by Jason Dere)

Posted by xu...@apache.org.
HIVE-12230 custom UDF configure() not called in Vectorization mode (Matt McCline, reviewd by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/95fcdb55
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/95fcdb55
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/95fcdb55

Branch: refs/heads/spark
Commit: 95fcdb55513e4771f7b387f714043870ef41ce66
Parents: d33ddef
Author: Matt McCline <mm...@hortonworks.com>
Authored: Thu Nov 5 13:16:14 2015 -0800
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Thu Nov 5 13:16:14 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/MapredContext.java      |  2 +-
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |  5 ++
 .../hive/ql/exec/vector/UDFHelloTest.java       | 69 +++++++++++++++++++
 .../vector_custom_udf_configure.q               | 11 +++
 .../vector_custom_udf_configure.q.out           | 70 ++++++++++++++++++++
 5 files changed, 156 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/95fcdb55/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
index 6ce84ac..b7ed0c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
@@ -116,7 +116,7 @@ public class MapredContext {
     udfs.clear();
   }
 
-  void setup(GenericUDF genericUDF) {
+  public void setup(GenericUDF genericUDF) {
     if (needConfigure(genericUDF)) {
       genericUDF.configure(this);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/95fcdb55/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index b397398..d3a0f9f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -21,6 +21,7 @@ import java.sql.Date;
 import java.sql.Timestamp;
 
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.vector.*;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
@@ -84,6 +85,10 @@ public class VectorUDFAdaptor extends VectorExpression {
     for (int i = 0; i < childrenOIs.length; i++) {
       childrenOIs[i] = writers[i].getObjectInspector();
     }
+    MapredContext context = MapredContext.get();
+    if (context != null) {
+      context.setup(genericUDF);
+    }
     outputOI = VectorExpressionWriterFactory.genVectorExpressionWritable(expr)
         .getObjectInspector();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/95fcdb55/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
new file mode 100644
index 0000000..48fb59a
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
@@ -0,0 +1,69 @@
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import org.apache.hadoop.hive.ql.exec.MapredContext;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * UDF to obfuscate input data appending "Hello "
+ */
+public class UDFHelloTest extends GenericUDF {
+  private static final Logger LOG = LoggerFactory.getLogger(UDFHelloTest.class);
+
+  private Text result = new Text();
+
+  private static String greeting = "";
+
+  private ObjectInspectorConverters.Converter[] converters;
+
+  @Override
+  public Object evaluate(DeferredObject[] arg0) throws HiveException {
+
+    if (arg0.length != 1) {
+      LOG.error("UDFHelloTest expects exactly 1 argument");
+      throw new HiveException("UDFHelloTest expects exactly 1 argument");
+    }
+
+    if (arg0[0].get() == null) {
+      LOG.warn("Empty input");
+      return null;
+    }
+
+    Text data = (Text) converters[0].convert(arg0[0].get());
+
+    String dataString = data.toString();
+
+    result.set(greeting + dataString);
+
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] arg0) {
+    return "Hello...";
+  }
+
+  @Override
+  public void configure(MapredContext context) {
+    greeting = "Hello ";
+  }
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arg0) throws UDFArgumentException {
+    converters = new ObjectInspectorConverters.Converter[arg0.length];
+    for (int i = 0; i < arg0.length; i++) {
+      converters[i] = ObjectInspectorConverters.getConverter(arg0[i],
+              PrimitiveObjectInspectorFactory.writableStringObjectInspector);
+    }
+
+    // evaluate will return a Text object
+    return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/95fcdb55/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q b/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
new file mode 100644
index 0000000..eb19f3a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
@@ -0,0 +1,11 @@
+set hive.fetch.task.conversion=none;
+
+create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest';
+
+create table testorc1(id int, name string) stored as orc;
+insert into table testorc1 values(1, 'a1'), (2,'a2');
+ 
+set hive.vectorized.execution.enabled=true;
+explain
+select id, UDFHelloTest(name) from testorc1;
+select id, UDFHelloTest(name) from testorc1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/95fcdb55/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out b/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
new file mode 100644
index 0000000..d529873
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: udfhellotest
+POSTHOOK: query: create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: udfhellotest
+PREHOOK: query: create table testorc1(id int, name string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testorc1
+POSTHOOK: query: create table testorc1(id int, name string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testorc1
+PREHOOK: query: insert into table testorc1 values(1, 'a1'), (2,'a2')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@testorc1
+POSTHOOK: query: insert into table testorc1 values(1, 'a1'), (2,'a2')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@testorc1
+POSTHOOK: Lineage: testorc1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: testorc1.name SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+select id, UDFHelloTest(name) from testorc1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select id, UDFHelloTest(name) from testorc1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testorc1
+            Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: id (type: int), Hello... (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select id, UDFHelloTest(name) from testorc1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc1
+#### A masked pattern was here ####
+POSTHOOK: query: select id, UDFHelloTest(name) from testorc1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc1
+#### A masked pattern was here ####
+1	Hello a1
+2	Hello a2


[24/55] [abbrv] hive git commit: HIVE-12288: Bloom-1 filters for Vectorized map-joins (Gopal V, reviewed by Matt McCline)

Posted by xu...@apache.org.
HIVE-12288: Bloom-1 filters for Vectorized map-joins (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b29705ed
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b29705ed
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b29705ed

Branch: refs/heads/spark
Commit: b29705ed73a936566a49360a5940c8b933f5e3db
Parents: b5654cc
Author: Gopal V <go...@apache.org>
Authored: Thu Nov 5 22:18:11 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Thu Nov 5 22:18:11 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/persistence/HybridHashTableContainer.java    | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b29705ed/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 632ba4f..a0c9b98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -985,6 +985,17 @@ public class HybridHashTableContainer
       int keyHash = HashCodeUtil.murmurHash(bytes, offset, length);
       partitionId = keyHash & (hashPartitions.length - 1);
 
+      if (!bloom1.testLong(keyHash)) {
+        /*
+         * if the keyHash is missing in the bloom filter, then the value cannot exist in any of the
+         * spilled partition - return NOMATCH
+         */
+        dummyRow = null;
+        aliasFilter = (byte) 0xff;
+        hashMapResult.forget();
+        return JoinResult.NOMATCH;
+      }
+
       // If the target hash table is on disk, spill this row to disk as well to be processed later
       if (isOnDisk(partitionId)) {
         return JoinUtil.JoinResult.SPILL;


[31/55] [abbrv] hive git commit: HIVE-12306: fix hbase_queries.q failure (Chaoyu Tang, reviewed by Jimmy Xiang)

Posted by xu...@apache.org.
HIVE-12306: fix hbase_queries.q failure (Chaoyu Tang, reviewed by Jimmy Xiang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/97735ecb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/97735ecb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/97735ecb

Branch: refs/heads/spark
Commit: 97735ecbd1636e18d3b8b18f6fa656774eaabc71
Parents: 58b85ac
Author: ctang <ct...@gmail.com>
Authored: Sat Nov 7 11:23:16 2015 -0500
Committer: ctang <ct...@gmail.com>
Committed: Sat Nov 7 11:23:16 2015 -0500

----------------------------------------------------------------------
 hbase-handler/src/test/results/positive/hbase_queries.q.out | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/97735ecb/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index d887566..d044c7e 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -2,14 +2,14 @@ PREHOOK: query: DROP TABLE hbase_table_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value') 
+PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
 TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value') 
+POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
 TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")


[53/55] [abbrv] hive git commit: HIVE-12208: Vectorized JOIN NPE on dynamically partitioned hash-join + map-join (Gunther Hagleitner, reviewed by Matt McCline)

Posted by xu...@apache.org.
HIVE-12208: Vectorized JOIN NPE on dynamically partitioned hash-join + map-join (Gunther Hagleitner, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6310fc5c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6310fc5c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6310fc5c

Branch: refs/heads/spark
Commit: 6310fc5c8b1115bb202b193c74b9d4755bcbdd57
Parents: da4b1b0
Author: Gunther Hagleitner <gu...@apache.org>
Authored: Wed Nov 11 13:41:26 2015 -0800
Committer: Gunther Hagleitner <gu...@apache.org>
Committed: Wed Nov 11 13:41:44 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |  2 +-
 .../mapjoin/VectorMapJoinCommonOperator.java    | 22 +++++---------------
 2 files changed, 6 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6310fc5c/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 4af98e5..cab0fc8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -183,7 +183,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
 
   @SuppressWarnings("unchecked")
   @Override
-  protected final void completeInitializationOp(Object[] os) throws HiveException {
+  protected void completeInitializationOp(Object[] os) throws HiveException {
     if (os.length != 0) {
       Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> pair =
           (Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]>) os[0];

http://git-wip-us.apache.org/repos/asf/hive/blob/6310fc5c/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index 1667bf7..1d5a9de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -614,46 +614,34 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
   }
 
   @Override
-  protected Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> loadHashTable(
-      ExecMapperContext mapContext, MapredContext mrContext) throws HiveException {
-
-    Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> pair;
+  protected void completeInitializationOp(Object[] os) throws HiveException {
+    // setup mapJoinTables and serdes
+    super.completeInitializationOp(os);
 
     VectorMapJoinDesc vectorDesc = conf.getVectorDesc();
     HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType();
     switch (vectorDesc.hashTableImplementationType()) {
     case OPTIMIZED:
       {
-        // Using Tez's HashTableLoader, create either a MapJoinBytesTableContainer or
-        // HybridHashTableContainer.
-        pair = super.loadHashTable(mapContext, mrContext);
-
         // Create our vector map join optimized hash table variation *above* the
         // map join table container.
-        MapJoinTableContainer[] mapJoinTableContainers = pair.getLeft();
         vectorMapJoinHashTable = VectorMapJoinOptimizedCreateHashTable.createHashTable(conf,
-                mapJoinTableContainers[posSingleVectorMapJoinSmallTable]);
+                mapJoinTables[posSingleVectorMapJoinSmallTable]);
       }
       break;
 
     case FAST:
       {
-        // Use our VectorMapJoinFastHashTableLoader to create a VectorMapJoinTableContainer.
-        pair = super.loadHashTable(mapContext, mrContext);
-
         // Get our vector map join fast hash table variation from the
         // vector map join table container.
-        MapJoinTableContainer[] mapJoinTableContainers = pair.getLeft();
         VectorMapJoinTableContainer vectorMapJoinTableContainer =
-                (VectorMapJoinTableContainer) mapJoinTableContainers[posSingleVectorMapJoinSmallTable];
+                (VectorMapJoinTableContainer) mapJoinTables[posSingleVectorMapJoinSmallTable];
         vectorMapJoinHashTable = vectorMapJoinTableContainer.vectorMapJoinHashTable();
       }
       break;
     default:
       throw new RuntimeException("Unknown vector map join hash table implementation type " + hashTableImplementationType.name());
     }
-
-    return pair;
   }
 
   /*


[25/55] [abbrv] hive git commit: HIVE-12346:Internally used variables in HiveConf should not be settable via command (Chaoyu Tang, reviewed by Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-12346:Internally used variables in HiveConf should not be settable via command (Chaoyu Tang, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ae1c5cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ae1c5cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ae1c5cc

Branch: refs/heads/spark
Commit: 2ae1c5cc7b3575b34dff5dbb7605b81bc19cc4b1
Parents: b29705e
Author: ctang <ct...@gmail.com>
Authored: Fri Nov 6 08:51:41 2015 -0500
Committer: ctang <ct...@gmail.com>
Committed: Fri Nov 6 08:51:53 2015 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/conf/HiveConf.java   | 15 ++++++++++++++-
 .../clientnegative/set_hiveconf_internal_variable0.q |  4 ++++
 .../clientnegative/set_hiveconf_internal_variable1.q |  4 ++++
 .../set_hiveconf_internal_variable0.q.out            | 11 +++++++++++
 .../set_hiveconf_internal_variable1.q.out            | 11 +++++++++++
 5 files changed, 44 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2ae1c5cc/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 98f9206..12276bf 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2104,6 +2104,10 @@ public class HiveConf extends Configuration {
         METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
         "Comma separated list of configuration options which should not be read by normal user like passwords"),
 
+    HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
+        "hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
+        "Comma separated list of variables which are used internally and should not be configurable."),
+
     // If this is set all move tasks at the end of a multi-insert query will only begin once all
     // outputs are ready
     HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
@@ -2634,7 +2638,7 @@ public class HiveConf extends Configuration {
     }
     if (restrictList.contains(name)) {
       throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
-          + "of parameters that can't be modified at runtime");
+          + " of parameters that can't be modified at runtime");
     }
     String oldValue = name != null ? get(name) : null;
     if (name == null || value == null || !value.equals(oldValue)) {
@@ -3329,9 +3333,18 @@ public class HiveConf extends Configuration {
         restrictList.add(entry.trim());
       }
     }
+
+    String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
+    if (internalVariableListStr != null) {
+      for (String entry : internalVariableListStr.split(",")) {
+        restrictList.add(entry.trim());
+      }
+    }
+
     restrictList.add(ConfVars.HIVE_IN_TEST.varname);
     restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
     restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
+    restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
   }
 
   private void setupHiddenSet() {

http://git-wip-us.apache.org/repos/asf/hive/blob/2ae1c5cc/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
new file mode 100644
index 0000000..b6393e4
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
@@ -0,0 +1,4 @@
+-- should fail: for some internal variables which should not be settable via set command
+desc src;
+
+set hive.added.jars.path=file://rootdir/test/added/a.jar;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ae1c5cc/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
new file mode 100644
index 0000000..0038f36
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
@@ -0,0 +1,4 @@
+-- should fail: hive.conf.internal.variable.list is in restricted list
+desc src;
+
+set hive.conf.internal.variable.list=;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ae1c5cc/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
new file mode 100644
index 0000000..61dafb4
--- /dev/null
+++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- should fail: for some internal variables which should not be settable via set command
+desc src
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src
+POSTHOOK: query: -- should fail: for some internal variables which should not be settable via set command
+desc src
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src
+key                 	string              	default             
+value               	string              	default             
+Query returned non-zero code: 1, cause: Cannot modify hive.added.jars.path at runtime. It is in the list of parameters that can't be modified at runtime

http://git-wip-us.apache.org/repos/asf/hive/blob/2ae1c5cc/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
new file mode 100644
index 0000000..ae2dafb
--- /dev/null
+++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list
+desc src
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src
+POSTHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list
+desc src
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src
+key                 	string              	default             
+value               	string              	default             
+Query returned non-zero code: 1, cause: Cannot modify hive.conf.internal.variable.list at runtime. It is in the list of parameters that can't be modified at runtime


[54/55] [abbrv] hive git commit: HIVE-12365: Added resource path is sent to cluster as an empty string when externally removed (Chaoyu Tang, reviewed by Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-12365: Added resource path is sent to cluster as an empty string when externally removed (Chaoyu Tang, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/206974a4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/206974a4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/206974a4

Branch: refs/heads/spark
Commit: 206974a49d10a70ddabcdbebc710fca2dc7099fd
Parents: 6310fc5
Author: ctang <ct...@gmail.com>
Authored: Wed Nov 11 18:37:19 2015 -0500
Committer: ctang <ct...@gmail.com>
Committed: Wed Nov 11 18:37:19 2015 -0500

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  7 ++++-
 .../clientpositive/add_jar_with_file_removed.q  | 15 +++++++++++
 .../add_jar_with_file_removed.q.out             | 27 ++++++++++++++++++++
 3 files changed, 48 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/206974a4/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 02adf0c..fc04f18 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2150,7 +2150,12 @@ public final class Utilities {
       List<String> realFiles = new ArrayList<String>(files.size());
       for (String one : files) {
         try {
-          realFiles.add(realFile(one, conf));
+          String onefile = realFile(one, conf);
+          if (onefile != null) {
+            realFiles.add(realFile(one, conf));
+          } else {
+            LOG.warn("The file " + one + " does not exist.");
+          }
         } catch (IOException e) {
           throw new RuntimeException("Cannot validate file " + one + "due to exception: "
               + e.getMessage(), e);

http://git-wip-us.apache.org/repos/asf/hive/blob/206974a4/ql/src/test/queries/clientpositive/add_jar_with_file_removed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/add_jar_with_file_removed.q b/ql/src/test/queries/clientpositive/add_jar_with_file_removed.q
new file mode 100644
index 0000000..95d7001
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/add_jar_with_file_removed.q
@@ -0,0 +1,15 @@
+!mkdir ${system:test.tmp.dir}/tmpjars;
+!touch ${system:test.tmp.dir}/tmpjars/added1.jar;
+!touch ${system:test.tmp.dir}/tmpjars/added2.jar;
+
+select count(key) from src;
+
+add jar ${system:test.tmp.dir}/tmpjars/added1.jar;
+add jar ${system:test.tmp.dir}/tmpjars/added2.jar;
+
+select count(key) from src;
+
+!rm ${system:test.tmp.dir}/tmpjars/added1.jar;
+
+select count(key) from src;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/206974a4/ql/src/test/results/clientpositive/add_jar_with_file_removed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/add_jar_with_file_removed.q.out b/ql/src/test/results/clientpositive/add_jar_with_file_removed.q.out
new file mode 100644
index 0000000..cb495f4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/add_jar_with_file_removed.q.out
@@ -0,0 +1,27 @@
+PREHOOK: query: select count(key) from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(key) from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(key) from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+500


[09/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/92620d8e/ql/src/test/results/clientpositive/lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
index 0185d43..aed41b0 100644
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ b/ql/src/test/results/clientpositive/lineage2.q.out
@@ -5,12 +5,12 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@src2
-{"version":"1.0","engine":"mr","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: select * from src1 where key is not null and value is not null limit 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key is not null and src1.value is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key is not null and src1.value is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 	
 311	val_311
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213
@@ -31,17 +31,17 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: insert into table dest1 select * from src2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select key k, dest1.value from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	val_238
 	
 311	val_311
@@ -97,7 +97,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -119,7 +119,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -140,7 +140,7 @@ PREHOOK: query: select key, count(1) a from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(1)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(1)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
 	20
 128	2
 146	2
@@ -161,7 +161,7 @@ PREHOOK: query: select key k, count(*) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(*)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(*)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
 	20
 128	2
 146	2
@@ -182,7 +182,7 @@ PREHOOK: query: select key k, count(value) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(default.dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(default.dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 	20
 128	2
 146	2
@@ -203,7 +203,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -227,7 +227,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value order b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -237,7 +237,7 @@ PREHOOK: query: select key, length(value) from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	7
 	0
 311	7
@@ -292,7 +292,7 @@ PREHOOK: query: select length(value) + 3 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 10
 3
 10
@@ -347,7 +347,7 @@ PREHOOK: query: select 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
 5
 5
 5
@@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
 15
 15
 15
@@ -461,31 +461,31 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.val
 ue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertex
 Id":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.s
 rc2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN",
 "vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.v
 alue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vert
 exId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2
   select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"de
 fault.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select * from src1 where length(key) > 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(length(src1.key) > 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(length(src1.key) > 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 311	val_311
 255	val_255
@@ -503,7 +503,7 @@ PREHOOK: query: select * from src1 where length(key) > 2 and value > 'a'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((length(src1.key) > 2) and (src1.value > 'a'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((length(src1.key) > 2) and (src1.value > 'a'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 311	val_311
 255	val_255
@@ -523,14 +523,14 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest3
-{"version":"1.0","engine":"mr","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"
 },{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId"
 :"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2
   select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1
 .value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","ve
 rtexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE
@@ -552,7 +552,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@dest_l1
-{"version":"1.0","engine":"mr","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(j.key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"j.value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(p1.key = t1.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(j.key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"j.value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(p1.key = t1.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
 PREHOOK: query: drop table if exists emp
 PREHOOK: type: DROPTABLE
 PREHOOK: query: drop table if exists dept
@@ -593,7 +593,7 @@ PREHOOK: Input: default@dept
 PREHOOK: Input: default@emp
 PREHOOK: Input: default@project
 PREHOOK: Output: default@tgt
-{"version":"1.0","engine":"mr","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PREDICATE"},{"sources":[1
 1,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PRED
 ICATE"},{"sources":[11,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
 PREHOOK: query: drop table if exists dest_l2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile
@@ -604,7 +604,7 @@ PREHOOK: query: insert into dest_l2 values(0, 1, 100, 10000)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@dest_l2
-{"version":"1.0","engine":"mr","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(values__tmp__table__1.tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(values__tmp__table__1.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(values__tmp__table__1.tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(values__tmp__table__1.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 PREHOOK: query: select * from (
   select c1 + c2 x from dest_l2
   union all
@@ -612,7 +612,7 @@ PREHOOK: query: select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"v2.x","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"v2.x","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 101
 10000
 PREHOOK: query: drop table if exists dest_l3
@@ -625,7 +625,7 @@ PREHOOK: query: insert into dest_l3 values(0, "s1", "s2", 15)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__2
 PREHOOK: Output: default@dest_l3
-{"version":"1.0","engine":"mr","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: select sum(a.c1) over (partition by a.c1 order by a.id)
 from dest_l2 a
 where a.c2 != 10
@@ -634,7 +634,7 @@ having count(a.c2) > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.de
 st_l2.id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","
 vertexId":"default.dest_l2.id"}]}
 1
 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
 from dest_l2 a join dest_l3 b on (a.id = b.id)
@@ -646,7 +646,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUM
 N","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":
 1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
 1	1	s2	15
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
@@ -659,7 +659,7 @@ PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
 concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
@@ -667,7 +667,7 @@ GROUP BY substr(src1.key,1,1)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT substr(src1.value, 5))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT substr(src1.value, 5))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 	7	1543.0
 1	3	1296.0
 2	6	21257.0
@@ -696,7 +696,7 @@ PREHOOK: query: select identity, ep1_id from relations
 PREHOOK: type: QUERY
 PREHOOK: Input: default@relations
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bb30b94d13d0b35802db85b4e33230b3","queryText":"select identity, ep1_id from relations\n  lateral view explode(ep1_ids) nav_rel as ep1_id","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"nav_rel._col11","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"identity"},{"id":1,"vertexType":"COLUMN","vertexId":"ep1_id"},{"id":2,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":3,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bb30b94d13d0b35802db85b4e33230b3","queryText":"select identity, ep1_id from relations\n  lateral view explode(ep1_ids) nav_rel as ep1_id","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"nav_rel._col11","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"identity"},{"id":1,"vertexType":"COLUMN","vertexId":"ep1_id"},{"id":2,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":3,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"}]}
 PREHOOK: query: insert into rels_exploded select identity, type,
   ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id
 from relations lateral view explode(ep1_ids) rel1 as ep1_id
@@ -704,4 +704,4 @@ from relations lateral view explode(ep1_ids) rel1 as ep1_id
 PREHOOK: type: QUERY
 PREHOOK: Input: default@relations
 PREHOOK: Output: default@rels_exploded
-{"version":"1.0","engine":"mr","hash":"e76d2efade744d1d5cf74fda064ba6c6","queryText":"insert into rels_exploded select identity, type,\n  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id\nfrom relations lateral view explode(ep1_ids) rel1 as ep1_id\n  lateral view explode (ep2_ids) rel2 as ep2_id","edges":[{"sources":[8],"targets":[0],"edgeType":"PROJECTION"},{"sources":[9],"targets":[1],"edgeType":"PROJECTION"},{"sources":[10],"targets":[2],"edgeType":"PROJECTION"},{"sources":[11],"targets":[3],"edgeType":"PROJECTION"},{"sources":[12],"targets":[4],"edgeType":"PROJECTION"},{"sources":[13],"targets":[5],"edgeType":"PROJECTION"},{"sources":[14],"targets":[6],"expression":"CAST( rel1._col11 AS CHAR(32)","edgeType":"PROJECTION"},{"sources":[15],"targets":[7],"expression":"CAST( rel2._col12 AS CHAR(32)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.rels_exploded.identity"},{"id":1,"vertexType":"COLUMN","vertexId":"default.rels_explo
 ded.type"},{"id":2,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_src_type"},{"id":3,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_type"},{"id":4,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_src_type"},{"id":5,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_type"},{"id":6,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_id"},{"id":7,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_id"},{"id":8,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":9,"vertexType":"COLUMN","vertexId":"default.relations.type"},{"id":10,"vertexType":"COLUMN","vertexId":"default.relations.ep1_src_type"},{"id":11,"vertexType":"COLUMN","vertexId":"default.relations.ep1_type"},{"id":12,"vertexType":"COLUMN","vertexId":"default.relations.ep2_src_type"},{"id":13,"vertexType":"COLUMN","vertexId":"default.relations.ep2_type"},{"id":14,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"},{"id":15,"vertexType":"COLU
 MN","vertexId":"default.relations.ep2_ids"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e76d2efade744d1d5cf74fda064ba6c6","queryText":"insert into rels_exploded select identity, type,\n  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id\nfrom relations lateral view explode(ep1_ids) rel1 as ep1_id\n  lateral view explode (ep2_ids) rel2 as ep2_id","edges":[{"sources":[8],"targets":[0],"edgeType":"PROJECTION"},{"sources":[9],"targets":[1],"edgeType":"PROJECTION"},{"sources":[10],"targets":[2],"edgeType":"PROJECTION"},{"sources":[11],"targets":[3],"edgeType":"PROJECTION"},{"sources":[12],"targets":[4],"edgeType":"PROJECTION"},{"sources":[13],"targets":[5],"edgeType":"PROJECTION"},{"sources":[14],"targets":[6],"expression":"CAST( rel1._col11 AS CHAR(32)","edgeType":"PROJECTION"},{"sources":[15],"targets":[7],"expression":"CAST( rel2._col12 AS CHAR(32)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.rels_exploded.identity"},{"id":1,"vertexType":"COLUMN","vertexId
 ":"default.rels_exploded.type"},{"id":2,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_src_type"},{"id":3,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_type"},{"id":4,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_src_type"},{"id":5,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_type"},{"id":6,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_id"},{"id":7,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_id"},{"id":8,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":9,"vertexType":"COLUMN","vertexId":"default.relations.type"},{"id":10,"vertexType":"COLUMN","vertexId":"default.relations.ep1_src_type"},{"id":11,"vertexType":"COLUMN","vertexId":"default.relations.ep1_type"},{"id":12,"vertexType":"COLUMN","vertexId":"default.relations.ep2_src_type"},{"id":13,"vertexType":"COLUMN","vertexId":"default.relations.ep2_type"},{"id":14,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"},{"id":
 15,"vertexType":"COLUMN","vertexId":"default.relations.ep2_ids"}]}


[20/55] [abbrv] hive git commit: HIVE-12164 : non-ascii characters shows improper with insert into (Aleksei Statkevich via Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-12164 : non-ascii characters shows improper with insert into (Aleksei Statkevich via Xuefu Zhang)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d06b69f5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d06b69f5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d06b69f5

Branch: refs/heads/spark
Commit: d06b69f57624cd6b6bfafd8e28512b6e8ae03b6a
Parents: 95fcdb5
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Mon Oct 19 22:37:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 13:54:53 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 16 ++++++++---
 .../clientpositive/insert_values_nonascii.q     |  9 +++++++
 .../clientpositive/insert_values_nonascii.q.out | 28 ++++++++++++++++++++
 3 files changed, 50 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d06b69f5/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f3d7057..f7e2039 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -216,6 +216,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -733,6 +734,15 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   /**
+   * Convert a string to Text format and write its bytes in the same way TextOutputFormat would do.
+   * This is needed to properly encode non-ascii characters.
+   */
+  private static void writeAsText(String text, FSDataOutputStream out) throws IOException {
+    Text to = new Text(text);
+    out.write(to.getBytes(), 0, to.getLength());
+  }
+
+  /**
    * Generate a temp table out of a value clause
    * See also {@link #preProcessForInsert(ASTNode, QB)}
    */
@@ -810,10 +820,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             fields.add(new FieldSchema("tmp_values_col" + nextColNum++, "string", ""));
           }
           if (isFirst) isFirst = false;
-          else out.writeBytes("\u0001");
-          out.writeBytes(unparseExprForValuesClause(value));
+          else writeAsText("\u0001", out);
+          writeAsText(unparseExprForValuesClause(value), out);
         }
-        out.writeBytes("\n");
+        writeAsText("\n", out);
         firstRow = false;
       }
       out.close();

http://git-wip-us.apache.org/repos/asf/hive/blob/d06b69f5/ql/src/test/queries/clientpositive/insert_values_nonascii.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_nonascii.q b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
new file mode 100644
index 0000000..2e4ef41
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table insert_values_nonascii(t1 char(32), t2 string);
+
+insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어");
+
+select * from insert_values_nonascii;

http://git-wip-us.apache.org/repos/asf/hive/blob/d06b69f5/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_values_nonascii.q.out b/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
new file mode 100644
index 0000000..ca07bef
--- /dev/null
+++ b/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
@@ -0,0 +1,28 @@
+PREHOOK: query: create table insert_values_nonascii(t1 char(32), t2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@insert_values_nonascii
+POSTHOOK: query: create table insert_values_nonascii(t1 char(32), t2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@insert_values_nonascii
+PREHOOK: query: insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@insert_values_nonascii
+POSTHOOK: query: insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@insert_values_nonascii
+POSTHOOK: Lineage: insert_values_nonascii.t1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_values_nonascii.t2 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select * from insert_values_nonascii
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_values_nonascii
+#### A masked pattern was here ####
+POSTHOOK: query: select * from insert_values_nonascii
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_values_nonascii
+#### A masked pattern was here ####
+Абвгде Garçu 谢谢                 	Kôkaku ありがとう
+ございます                           	kidôtai한국어


[51/55] [abbrv] hive git commit: HIVE-12363: Incorrect results with orc ppd across ORC versions (Gopal V, reviewed by Prasanth Jayachandran)

Posted by xu...@apache.org.
HIVE-12363: Incorrect results with orc ppd across ORC versions (Gopal V, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1d5da097
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1d5da097
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1d5da097

Branch: refs/heads/spark
Commit: 1d5da097a6b0554894f13840542ca96917c9d9d2
Parents: 4f7f882
Author: Gopal V <go...@apache.org>
Authored: Wed Nov 11 06:37:04 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Wed Nov 11 06:37:34 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1d5da097/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index 0696277..04b9eaf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.io.Text;
 public class RecordReaderImpl implements RecordReader {
   static final Logger LOG = LoggerFactory.getLogger(RecordReaderImpl.class);
   private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
+  private static final Object UNKNOWN_VALUE = new Object();
   private final Path path;
   private final long firstRow;
   private final List<StripeInformation> stripes =
@@ -310,7 +311,7 @@ public class RecordReaderImpl implements RecordReader {
         return Boolean.TRUE;
       }
     } else {
-      return null;
+      return UNKNOWN_VALUE; // null is not safe here
     }
   }
 
@@ -359,6 +360,8 @@ public class RecordReaderImpl implements RecordReader {
       } else {
         return TruthValue.NULL;
       }
+    } else if (min == UNKNOWN_VALUE) {
+      return TruthValue.YES_NO_NULL;
     }
 
     TruthValue result;


[42/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index a6862be..34c2205 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -181,6 +181,14 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function get_tables($db_name, $pattern);
   /**
+   * @param string $db_patterns
+   * @param string $tbl_patterns
+   * @param string[] $tbl_types
+   * @return \metastore\TableMeta[]
+   * @throws \metastore\MetaException
+   */
+  public function get_table_meta($db_patterns, $tbl_patterns, array $tbl_types);
+  /**
    * @param string $db_name
    * @return string[]
    * @throws \metastore\MetaException
@@ -2254,6 +2262,62 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     throw new \Exception("get_tables failed: unknown result");
   }
 
+  public function get_table_meta($db_patterns, $tbl_patterns, array $tbl_types)
+  {
+    $this->send_get_table_meta($db_patterns, $tbl_patterns, $tbl_types);
+    return $this->recv_get_table_meta();
+  }
+
+  public function send_get_table_meta($db_patterns, $tbl_patterns, array $tbl_types)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_table_meta_args();
+    $args->db_patterns = $db_patterns;
+    $args->tbl_patterns = $tbl_patterns;
+    $args->tbl_types = $tbl_types;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_table_meta', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_table_meta', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_table_meta()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_table_meta_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_table_meta_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    throw new \Exception("get_table_meta failed: unknown result");
+  }
+
   public function get_all_tables($db_name)
   {
     $this->send_get_all_tables($db_name);
@@ -13287,6 +13351,281 @@ class ThriftHiveMetastore_get_tables_result {
 
 }
 
+class ThriftHiveMetastore_get_table_meta_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $db_patterns = null;
+  /**
+   * @var string
+   */
+  public $tbl_patterns = null;
+  /**
+   * @var string[]
+   */
+  public $tbl_types = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'db_patterns',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'tbl_patterns',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'tbl_types',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['db_patterns'])) {
+        $this->db_patterns = $vals['db_patterns'];
+      }
+      if (isset($vals['tbl_patterns'])) {
+        $this->tbl_patterns = $vals['tbl_patterns'];
+      }
+      if (isset($vals['tbl_types'])) {
+        $this->tbl_types = $vals['tbl_types'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_table_meta_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->db_patterns);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->tbl_patterns);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::LST) {
+            $this->tbl_types = array();
+            $_size583 = 0;
+            $_etype586 = 0;
+            $xfer += $input->readListBegin($_etype586, $_size583);
+            for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
+            {
+              $elem588 = null;
+              $xfer += $input->readString($elem588);
+              $this->tbl_types []= $elem588;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_table_meta_args');
+    if ($this->db_patterns !== null) {
+      $xfer += $output->writeFieldBegin('db_patterns', TType::STRING, 1);
+      $xfer += $output->writeString($this->db_patterns);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tbl_patterns !== null) {
+      $xfer += $output->writeFieldBegin('tbl_patterns', TType::STRING, 2);
+      $xfer += $output->writeString($this->tbl_patterns);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tbl_types !== null) {
+      if (!is_array($this->tbl_types)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('tbl_types', TType::LST, 3);
+      {
+        $output->writeListBegin(TType::STRING, count($this->tbl_types));
+        {
+          foreach ($this->tbl_types as $iter589)
+          {
+            $xfer += $output->writeString($iter589);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_table_meta_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\TableMeta[]
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\TableMeta',
+            ),
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_table_meta_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size590 = 0;
+            $_etype593 = 0;
+            $xfer += $input->readListBegin($_etype593, $_size590);
+            for ($_i594 = 0; $_i594 < $_size590; ++$_i594)
+            {
+              $elem595 = null;
+              $elem595 = new \metastore\TableMeta();
+              $xfer += $elem595->read($input);
+              $this->success []= $elem595;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_table_meta_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->success));
+        {
+          foreach ($this->success as $iter596)
+          {
+            $xfer += $iter596->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class ThriftHiveMetastore_get_all_tables_args {
   static $_TSPEC;
 
@@ -13424,14 +13763,14 @@ class ThriftHiveMetastore_get_all_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size583 = 0;
-            $_etype586 = 0;
-            $xfer += $input->readListBegin($_etype586, $_size583);
-            for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
+            $_size597 = 0;
+            $_etype600 = 0;
+            $xfer += $input->readListBegin($_etype600, $_size597);
+            for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
             {
-              $elem588 = null;
-              $xfer += $input->readString($elem588);
-              $this->success []= $elem588;
+              $elem602 = null;
+              $xfer += $input->readString($elem602);
+              $this->success []= $elem602;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13467,9 +13806,9 @@ class ThriftHiveMetastore_get_all_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter589)
+          foreach ($this->success as $iter603)
           {
-            $xfer += $output->writeString($iter589);
+            $xfer += $output->writeString($iter603);
           }
         }
         $output->writeListEnd();
@@ -13784,14 +14123,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->tbl_names = array();
-            $_size590 = 0;
-            $_etype593 = 0;
-            $xfer += $input->readListBegin($_etype593, $_size590);
-            for ($_i594 = 0; $_i594 < $_size590; ++$_i594)
+            $_size604 = 0;
+            $_etype607 = 0;
+            $xfer += $input->readListBegin($_etype607, $_size604);
+            for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
             {
-              $elem595 = null;
-              $xfer += $input->readString($elem595);
-              $this->tbl_names []= $elem595;
+              $elem609 = null;
+              $xfer += $input->readString($elem609);
+              $this->tbl_names []= $elem609;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13824,9 +14163,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_names));
         {
-          foreach ($this->tbl_names as $iter596)
+          foreach ($this->tbl_names as $iter610)
           {
-            $xfer += $output->writeString($iter596);
+            $xfer += $output->writeString($iter610);
           }
         }
         $output->writeListEnd();
@@ -13927,15 +14266,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size597 = 0;
-            $_etype600 = 0;
-            $xfer += $input->readListBegin($_etype600, $_size597);
-            for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
+            $_size611 = 0;
+            $_etype614 = 0;
+            $xfer += $input->readListBegin($_etype614, $_size611);
+            for ($_i615 = 0; $_i615 < $_size611; ++$_i615)
             {
-              $elem602 = null;
-              $elem602 = new \metastore\Table();
-              $xfer += $elem602->read($input);
-              $this->success []= $elem602;
+              $elem616 = null;
+              $elem616 = new \metastore\Table();
+              $xfer += $elem616->read($input);
+              $this->success []= $elem616;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13987,9 +14326,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter603)
+          foreach ($this->success as $iter617)
           {
-            $xfer += $iter603->write($output);
+            $xfer += $iter617->write($output);
           }
         }
         $output->writeListEnd();
@@ -14225,14 +14564,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size604 = 0;
-            $_etype607 = 0;
-            $xfer += $input->readListBegin($_etype607, $_size604);
-            for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
+            $_size618 = 0;
+            $_etype621 = 0;
+            $xfer += $input->readListBegin($_etype621, $_size618);
+            for ($_i622 = 0; $_i622 < $_size618; ++$_i622)
             {
-              $elem609 = null;
-              $xfer += $input->readString($elem609);
-              $this->success []= $elem609;
+              $elem623 = null;
+              $xfer += $input->readString($elem623);
+              $this->success []= $elem623;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14284,9 +14623,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter610)
+          foreach ($this->success as $iter624)
           {
-            $xfer += $output->writeString($iter610);
+            $xfer += $output->writeString($iter624);
           }
         }
         $output->writeListEnd();
@@ -15599,15 +15938,15 @@ class ThriftHiveMetastore_add_partitions_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size611 = 0;
-            $_etype614 = 0;
-            $xfer += $input->readListBegin($_etype614, $_size611);
-            for ($_i615 = 0; $_i615 < $_size611; ++$_i615)
+            $_size625 = 0;
+            $_etype628 = 0;
+            $xfer += $input->readListBegin($_etype628, $_size625);
+            for ($_i629 = 0; $_i629 < $_size625; ++$_i629)
             {
-              $elem616 = null;
-              $elem616 = new \metastore\Partition();
-              $xfer += $elem616->read($input);
-              $this->new_parts []= $elem616;
+              $elem630 = null;
+              $elem630 = new \metastore\Partition();
+              $xfer += $elem630->read($input);
+              $this->new_parts []= $elem630;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15635,9 +15974,9 @@ class ThriftHiveMetastore_add_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter617)
+          foreach ($this->new_parts as $iter631)
           {
-            $xfer += $iter617->write($output);
+            $xfer += $iter631->write($output);
           }
         }
         $output->writeListEnd();
@@ -15852,15 +16191,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size618 = 0;
-            $_etype621 = 0;
-            $xfer += $input->readListBegin($_etype621, $_size618);
-            for ($_i622 = 0; $_i622 < $_size618; ++$_i622)
+            $_size632 = 0;
+            $_etype635 = 0;
+            $xfer += $input->readListBegin($_etype635, $_size632);
+            for ($_i636 = 0; $_i636 < $_size632; ++$_i636)
             {
-              $elem623 = null;
-              $elem623 = new \metastore\PartitionSpec();
-              $xfer += $elem623->read($input);
-              $this->new_parts []= $elem623;
+              $elem637 = null;
+              $elem637 = new \metastore\PartitionSpec();
+              $xfer += $elem637->read($input);
+              $this->new_parts []= $elem637;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15888,9 +16227,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter624)
+          foreach ($this->new_parts as $iter638)
           {
-            $xfer += $iter624->write($output);
+            $xfer += $iter638->write($output);
           }
         }
         $output->writeListEnd();
@@ -16140,14 +16479,14 @@ class ThriftHiveMetastore_append_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size625 = 0;
-            $_etype628 = 0;
-            $xfer += $input->readListBegin($_etype628, $_size625);
-            for ($_i629 = 0; $_i629 < $_size625; ++$_i629)
+            $_size639 = 0;
+            $_etype642 = 0;
+            $xfer += $input->readListBegin($_etype642, $_size639);
+            for ($_i643 = 0; $_i643 < $_size639; ++$_i643)
             {
-              $elem630 = null;
-              $xfer += $input->readString($elem630);
-              $this->part_vals []= $elem630;
+              $elem644 = null;
+              $xfer += $input->readString($elem644);
+              $this->part_vals []= $elem644;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16185,9 +16524,9 @@ class ThriftHiveMetastore_append_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter631)
+          foreach ($this->part_vals as $iter645)
           {
-            $xfer += $output->writeString($iter631);
+            $xfer += $output->writeString($iter645);
           }
         }
         $output->writeListEnd();
@@ -16689,14 +17028,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size632 = 0;
-            $_etype635 = 0;
-            $xfer += $input->readListBegin($_etype635, $_size632);
-            for ($_i636 = 0; $_i636 < $_size632; ++$_i636)
+            $_size646 = 0;
+            $_etype649 = 0;
+            $xfer += $input->readListBegin($_etype649, $_size646);
+            for ($_i650 = 0; $_i650 < $_size646; ++$_i650)
             {
-              $elem637 = null;
-              $xfer += $input->readString($elem637);
-              $this->part_vals []= $elem637;
+              $elem651 = null;
+              $xfer += $input->readString($elem651);
+              $this->part_vals []= $elem651;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16742,9 +17081,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter638)
+          foreach ($this->part_vals as $iter652)
           {
-            $xfer += $output->writeString($iter638);
+            $xfer += $output->writeString($iter652);
           }
         }
         $output->writeListEnd();
@@ -17598,14 +17937,14 @@ class ThriftHiveMetastore_drop_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size639 = 0;
-            $_etype642 = 0;
-            $xfer += $input->readListBegin($_etype642, $_size639);
-            for ($_i643 = 0; $_i643 < $_size639; ++$_i643)
+            $_size653 = 0;
+            $_etype656 = 0;
+            $xfer += $input->readListBegin($_etype656, $_size653);
+            for ($_i657 = 0; $_i657 < $_size653; ++$_i657)
             {
-              $elem644 = null;
-              $xfer += $input->readString($elem644);
-              $this->part_vals []= $elem644;
+              $elem658 = null;
+              $xfer += $input->readString($elem658);
+              $this->part_vals []= $elem658;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17650,9 +17989,9 @@ class ThriftHiveMetastore_drop_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter645)
+          foreach ($this->part_vals as $iter659)
           {
-            $xfer += $output->writeString($iter645);
+            $xfer += $output->writeString($iter659);
           }
         }
         $output->writeListEnd();
@@ -17905,14 +18244,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size646 = 0;
-            $_etype649 = 0;
-            $xfer += $input->readListBegin($_etype649, $_size646);
-            for ($_i650 = 0; $_i650 < $_size646; ++$_i650)
+            $_size660 = 0;
+            $_etype663 = 0;
+            $xfer += $input->readListBegin($_etype663, $_size660);
+            for ($_i664 = 0; $_i664 < $_size660; ++$_i664)
             {
-              $elem651 = null;
-              $xfer += $input->readString($elem651);
-              $this->part_vals []= $elem651;
+              $elem665 = null;
+              $xfer += $input->readString($elem665);
+              $this->part_vals []= $elem665;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17965,9 +18304,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter652)
+          foreach ($this->part_vals as $iter666)
           {
-            $xfer += $output->writeString($iter652);
+            $xfer += $output->writeString($iter666);
           }
         }
         $output->writeListEnd();
@@ -18981,14 +19320,14 @@ class ThriftHiveMetastore_get_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size653 = 0;
-            $_etype656 = 0;
-            $xfer += $input->readListBegin($_etype656, $_size653);
-            for ($_i657 = 0; $_i657 < $_size653; ++$_i657)
+            $_size667 = 0;
+            $_etype670 = 0;
+            $xfer += $input->readListBegin($_etype670, $_size667);
+            for ($_i671 = 0; $_i671 < $_size667; ++$_i671)
             {
-              $elem658 = null;
-              $xfer += $input->readString($elem658);
-              $this->part_vals []= $elem658;
+              $elem672 = null;
+              $xfer += $input->readString($elem672);
+              $this->part_vals []= $elem672;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19026,9 +19365,9 @@ class ThriftHiveMetastore_get_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter659)
+          foreach ($this->part_vals as $iter673)
           {
-            $xfer += $output->writeString($iter659);
+            $xfer += $output->writeString($iter673);
           }
         }
         $output->writeListEnd();
@@ -19270,17 +19609,17 @@ class ThriftHiveMetastore_exchange_partition_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size660 = 0;
-            $_ktype661 = 0;
-            $_vtype662 = 0;
-            $xfer += $input->readMapBegin($_ktype661, $_vtype662, $_size660);
-            for ($_i664 = 0; $_i664 < $_size660; ++$_i664)
+            $_size674 = 0;
+            $_ktype675 = 0;
+            $_vtype676 = 0;
+            $xfer += $input->readMapBegin($_ktype675, $_vtype676, $_size674);
+            for ($_i678 = 0; $_i678 < $_size674; ++$_i678)
             {
-              $key665 = '';
-              $val666 = '';
-              $xfer += $input->readString($key665);
-              $xfer += $input->readString($val666);
-              $this->partitionSpecs[$key665] = $val666;
+              $key679 = '';
+              $val680 = '';
+              $xfer += $input->readString($key679);
+              $xfer += $input->readString($val680);
+              $this->partitionSpecs[$key679] = $val680;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -19336,10 +19675,10 @@ class ThriftHiveMetastore_exchange_partition_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter667 => $viter668)
+          foreach ($this->partitionSpecs as $kiter681 => $viter682)
           {
-            $xfer += $output->writeString($kiter667);
-            $xfer += $output->writeString($viter668);
+            $xfer += $output->writeString($kiter681);
+            $xfer += $output->writeString($viter682);
           }
         }
         $output->writeMapEnd();
@@ -19651,17 +19990,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size669 = 0;
-            $_ktype670 = 0;
-            $_vtype671 = 0;
-            $xfer += $input->readMapBegin($_ktype670, $_vtype671, $_size669);
-            for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
+            $_size683 = 0;
+            $_ktype684 = 0;
+            $_vtype685 = 0;
+            $xfer += $input->readMapBegin($_ktype684, $_vtype685, $_size683);
+            for ($_i687 = 0; $_i687 < $_size683; ++$_i687)
             {
-              $key674 = '';
-              $val675 = '';
-              $xfer += $input->readString($key674);
-              $xfer += $input->readString($val675);
-              $this->partitionSpecs[$key674] = $val675;
+              $key688 = '';
+              $val689 = '';
+              $xfer += $input->readString($key688);
+              $xfer += $input->readString($val689);
+              $this->partitionSpecs[$key688] = $val689;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -19717,10 +20056,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter676 => $viter677)
+          foreach ($this->partitionSpecs as $kiter690 => $viter691)
           {
-            $xfer += $output->writeString($kiter676);
-            $xfer += $output->writeString($viter677);
+            $xfer += $output->writeString($kiter690);
+            $xfer += $output->writeString($viter691);
           }
         }
         $output->writeMapEnd();
@@ -19853,15 +20192,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size678 = 0;
-            $_etype681 = 0;
-            $xfer += $input->readListBegin($_etype681, $_size678);
-            for ($_i682 = 0; $_i682 < $_size678; ++$_i682)
+            $_size692 = 0;
+            $_etype695 = 0;
+            $xfer += $input->readListBegin($_etype695, $_size692);
+            for ($_i696 = 0; $_i696 < $_size692; ++$_i696)
             {
-              $elem683 = null;
-              $elem683 = new \metastore\Partition();
-              $xfer += $elem683->read($input);
-              $this->success []= $elem683;
+              $elem697 = null;
+              $elem697 = new \metastore\Partition();
+              $xfer += $elem697->read($input);
+              $this->success []= $elem697;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19921,9 +20260,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter684)
+          foreach ($this->success as $iter698)
           {
-            $xfer += $iter684->write($output);
+            $xfer += $iter698->write($output);
           }
         }
         $output->writeListEnd();
@@ -20069,14 +20408,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size685 = 0;
-            $_etype688 = 0;
-            $xfer += $input->readListBegin($_etype688, $_size685);
-            for ($_i689 = 0; $_i689 < $_size685; ++$_i689)
+            $_size699 = 0;
+            $_etype702 = 0;
+            $xfer += $input->readListBegin($_etype702, $_size699);
+            for ($_i703 = 0; $_i703 < $_size699; ++$_i703)
             {
-              $elem690 = null;
-              $xfer += $input->readString($elem690);
-              $this->part_vals []= $elem690;
+              $elem704 = null;
+              $xfer += $input->readString($elem704);
+              $this->part_vals []= $elem704;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20093,14 +20432,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size691 = 0;
-            $_etype694 = 0;
-            $xfer += $input->readListBegin($_etype694, $_size691);
-            for ($_i695 = 0; $_i695 < $_size691; ++$_i695)
+            $_size705 = 0;
+            $_etype708 = 0;
+            $xfer += $input->readListBegin($_etype708, $_size705);
+            for ($_i709 = 0; $_i709 < $_size705; ++$_i709)
             {
-              $elem696 = null;
-              $xfer += $input->readString($elem696);
-              $this->group_names []= $elem696;
+              $elem710 = null;
+              $xfer += $input->readString($elem710);
+              $this->group_names []= $elem710;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20138,9 +20477,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter697)
+          foreach ($this->part_vals as $iter711)
           {
-            $xfer += $output->writeString($iter697);
+            $xfer += $output->writeString($iter711);
           }
         }
         $output->writeListEnd();
@@ -20160,9 +20499,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter698)
+          foreach ($this->group_names as $iter712)
           {
-            $xfer += $output->writeString($iter698);
+            $xfer += $output->writeString($iter712);
           }
         }
         $output->writeListEnd();
@@ -20753,15 +21092,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size699 = 0;
-            $_etype702 = 0;
-            $xfer += $input->readListBegin($_etype702, $_size699);
-            for ($_i703 = 0; $_i703 < $_size699; ++$_i703)
+            $_size713 = 0;
+            $_etype716 = 0;
+            $xfer += $input->readListBegin($_etype716, $_size713);
+            for ($_i717 = 0; $_i717 < $_size713; ++$_i717)
             {
-              $elem704 = null;
-              $elem704 = new \metastore\Partition();
-              $xfer += $elem704->read($input);
-              $this->success []= $elem704;
+              $elem718 = null;
+              $elem718 = new \metastore\Partition();
+              $xfer += $elem718->read($input);
+              $this->success []= $elem718;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20805,9 +21144,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter705)
+          foreach ($this->success as $iter719)
           {
-            $xfer += $iter705->write($output);
+            $xfer += $iter719->write($output);
           }
         }
         $output->writeListEnd();
@@ -20953,14 +21292,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size706 = 0;
-            $_etype709 = 0;
-            $xfer += $input->readListBegin($_etype709, $_size706);
-            for ($_i710 = 0; $_i710 < $_size706; ++$_i710)
+            $_size720 = 0;
+            $_etype723 = 0;
+            $xfer += $input->readListBegin($_etype723, $_size720);
+            for ($_i724 = 0; $_i724 < $_size720; ++$_i724)
             {
-              $elem711 = null;
-              $xfer += $input->readString($elem711);
-              $this->group_names []= $elem711;
+              $elem725 = null;
+              $xfer += $input->readString($elem725);
+              $this->group_names []= $elem725;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21008,9 +21347,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter712)
+          foreach ($this->group_names as $iter726)
           {
-            $xfer += $output->writeString($iter712);
+            $xfer += $output->writeString($iter726);
           }
         }
         $output->writeListEnd();
@@ -21099,15 +21438,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size713 = 0;
-            $_etype716 = 0;
-            $xfer += $input->readListBegin($_etype716, $_size713);
-            for ($_i717 = 0; $_i717 < $_size713; ++$_i717)
+            $_size727 = 0;
+            $_etype730 = 0;
+            $xfer += $input->readListBegin($_etype730, $_size727);
+            for ($_i731 = 0; $_i731 < $_size727; ++$_i731)
             {
-              $elem718 = null;
-              $elem718 = new \metastore\Partition();
-              $xfer += $elem718->read($input);
-              $this->success []= $elem718;
+              $elem732 = null;
+              $elem732 = new \metastore\Partition();
+              $xfer += $elem732->read($input);
+              $this->success []= $elem732;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21151,9 +21490,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter719)
+          foreach ($this->success as $iter733)
           {
-            $xfer += $iter719->write($output);
+            $xfer += $iter733->write($output);
           }
         }
         $output->writeListEnd();
@@ -21373,15 +21712,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size720 = 0;
-            $_etype723 = 0;
-            $xfer += $input->readListBegin($_etype723, $_size720);
-            for ($_i724 = 0; $_i724 < $_size720; ++$_i724)
+            $_size734 = 0;
+            $_etype737 = 0;
+            $xfer += $input->readListBegin($_etype737, $_size734);
+            for ($_i738 = 0; $_i738 < $_size734; ++$_i738)
             {
-              $elem725 = null;
-              $elem725 = new \metastore\PartitionSpec();
-              $xfer += $elem725->read($input);
-              $this->success []= $elem725;
+              $elem739 = null;
+              $elem739 = new \metastore\PartitionSpec();
+              $xfer += $elem739->read($input);
+              $this->success []= $elem739;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21425,9 +21764,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter726)
+          foreach ($this->success as $iter740)
           {
-            $xfer += $iter726->write($output);
+            $xfer += $iter740->write($output);
           }
         }
         $output->writeListEnd();
@@ -21634,14 +21973,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size727 = 0;
-            $_etype730 = 0;
-            $xfer += $input->readListBegin($_etype730, $_size727);
-            for ($_i731 = 0; $_i731 < $_size727; ++$_i731)
+            $_size741 = 0;
+            $_etype744 = 0;
+            $xfer += $input->readListBegin($_etype744, $_size741);
+            for ($_i745 = 0; $_i745 < $_size741; ++$_i745)
             {
-              $elem732 = null;
-              $xfer += $input->readString($elem732);
-              $this->success []= $elem732;
+              $elem746 = null;
+              $xfer += $input->readString($elem746);
+              $this->success []= $elem746;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21677,9 +22016,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter733)
+          foreach ($this->success as $iter747)
           {
-            $xfer += $output->writeString($iter733);
+            $xfer += $output->writeString($iter747);
           }
         }
         $output->writeListEnd();
@@ -21795,14 +22134,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size734 = 0;
-            $_etype737 = 0;
-            $xfer += $input->readListBegin($_etype737, $_size734);
-            for ($_i738 = 0; $_i738 < $_size734; ++$_i738)
+            $_size748 = 0;
+            $_etype751 = 0;
+            $xfer += $input->readListBegin($_etype751, $_size748);
+            for ($_i752 = 0; $_i752 < $_size748; ++$_i752)
             {
-              $elem739 = null;
-              $xfer += $input->readString($elem739);
-              $this->part_vals []= $elem739;
+              $elem753 = null;
+              $xfer += $input->readString($elem753);
+              $this->part_vals []= $elem753;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21847,9 +22186,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter740)
+          foreach ($this->part_vals as $iter754)
           {
-            $xfer += $output->writeString($iter740);
+            $xfer += $output->writeString($iter754);
           }
         }
         $output->writeListEnd();
@@ -21943,15 +22282,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size741 = 0;
-            $_etype744 = 0;
-            $xfer += $input->readListBegin($_etype744, $_size741);
-            for ($_i745 = 0; $_i745 < $_size741; ++$_i745)
+            $_size755 = 0;
+            $_etype758 = 0;
+            $xfer += $input->readListBegin($_etype758, $_size755);
+            for ($_i759 = 0; $_i759 < $_size755; ++$_i759)
             {
-              $elem746 = null;
-              $elem746 = new \metastore\Partition();
-              $xfer += $elem746->read($input);
-              $this->success []= $elem746;
+              $elem760 = null;
+              $elem760 = new \metastore\Partition();
+              $xfer += $elem760->read($input);
+              $this->success []= $elem760;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21995,9 +22334,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter747)
+          foreach ($this->success as $iter761)
           {
-            $xfer += $iter747->write($output);
+            $xfer += $iter761->write($output);
           }
         }
         $output->writeListEnd();
@@ -22144,14 +22483,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size748 = 0;
-            $_etype751 = 0;
-            $xfer += $input->readListBegin($_etype751, $_size748);
-            for ($_i752 = 0; $_i752 < $_size748; ++$_i752)
+            $_size762 = 0;
+            $_etype765 = 0;
+            $xfer += $input->readListBegin($_etype765, $_size762);
+            for ($_i766 = 0; $_i766 < $_size762; ++$_i766)
             {
-              $elem753 = null;
-              $xfer += $input->readString($elem753);
-              $this->part_vals []= $elem753;
+              $elem767 = null;
+              $xfer += $input->readString($elem767);
+              $this->part_vals []= $elem767;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22175,14 +22514,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size754 = 0;
-            $_etype757 = 0;
-            $xfer += $input->readListBegin($_etype757, $_size754);
-            for ($_i758 = 0; $_i758 < $_size754; ++$_i758)
+            $_size768 = 0;
+            $_etype771 = 0;
+            $xfer += $input->readListBegin($_etype771, $_size768);
+            for ($_i772 = 0; $_i772 < $_size768; ++$_i772)
             {
-              $elem759 = null;
-              $xfer += $input->readString($elem759);
-              $this->group_names []= $elem759;
+              $elem773 = null;
+              $xfer += $input->readString($elem773);
+              $this->group_names []= $elem773;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22220,9 +22559,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter760)
+          foreach ($this->part_vals as $iter774)
           {
-            $xfer += $output->writeString($iter760);
+            $xfer += $output->writeString($iter774);
           }
         }
         $output->writeListEnd();
@@ -22247,9 +22586,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter761)
+          foreach ($this->group_names as $iter775)
           {
-            $xfer += $output->writeString($iter761);
+            $xfer += $output->writeString($iter775);
           }
         }
         $output->writeListEnd();
@@ -22338,15 +22677,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size762 = 0;
-            $_etype765 = 0;
-            $xfer += $input->readListBegin($_etype765, $_size762);
-            for ($_i766 = 0; $_i766 < $_size762; ++$_i766)
+            $_size776 = 0;
+            $_etype779 = 0;
+            $xfer += $input->readListBegin($_etype779, $_size776);
+            for ($_i780 = 0; $_i780 < $_size776; ++$_i780)
             {
-              $elem767 = null;
-              $elem767 = new \metastore\Partition();
-              $xfer += $elem767->read($input);
-              $this->success []= $elem767;
+              $elem781 = null;
+              $elem781 = new \metastore\Partition();
+              $xfer += $elem781->read($input);
+              $this->success []= $elem781;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22390,9 +22729,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter768)
+          foreach ($this->success as $iter782)
           {
-            $xfer += $iter768->write($output);
+            $xfer += $iter782->write($output);
           }
         }
         $output->writeListEnd();
@@ -22513,14 +22852,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size769 = 0;
-            $_etype772 = 0;
-            $xfer += $input->readListBegin($_etype772, $_size769);
-            for ($_i773 = 0; $_i773 < $_size769; ++$_i773)
+            $_size783 = 0;
+            $_etype786 = 0;
+            $xfer += $input->readListBegin($_etype786, $_size783);
+            for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
             {
-              $elem774 = null;
-              $xfer += $input->readString($elem774);
-              $this->part_vals []= $elem774;
+              $elem788 = null;
+              $xfer += $input->readString($elem788);
+              $this->part_vals []= $elem788;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22565,9 +22904,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter775)
+          foreach ($this->part_vals as $iter789)
           {
-            $xfer += $output->writeString($iter775);
+            $xfer += $output->writeString($iter789);
           }
         }
         $output->writeListEnd();
@@ -22660,14 +22999,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size776 = 0;
-            $_etype779 = 0;
-            $xfer += $input->readListBegin($_etype779, $_size776);
-            for ($_i780 = 0; $_i780 < $_size776; ++$_i780)
+            $_size790 = 0;
+            $_etype793 = 0;
+            $xfer += $input->readListBegin($_etype793, $_size790);
+            for ($_i794 = 0; $_i794 < $_size790; ++$_i794)
             {
-              $elem781 = null;
-              $xfer += $input->readString($elem781);
-              $this->success []= $elem781;
+              $elem795 = null;
+              $xfer += $input->readString($elem795);
+              $this->success []= $elem795;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22711,9 +23050,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter782)
+          foreach ($this->success as $iter796)
           {
-            $xfer += $output->writeString($iter782);
+            $xfer += $output->writeString($iter796);
           }
         }
         $output->writeListEnd();
@@ -22956,15 +23295,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size783 = 0;
-            $_etype786 = 0;
-            $xfer += $input->readListBegin($_etype786, $_size783);
-            for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
+            $_size797 = 0;
+            $_etype800 = 0;
+            $xfer += $input->readListBegin($_etype800, $_size797);
+            for ($_i801 = 0; $_i801 < $_size797; ++$_i801)
             {
-              $elem788 = null;
-              $elem788 = new \metastore\Partition();
-              $xfer += $elem788->read($input);
-              $this->success []= $elem788;
+              $elem802 = null;
+              $elem802 = new \metastore\Partition();
+              $xfer += $elem802->read($input);
+              $this->success []= $elem802;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23008,9 +23347,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter789)
+          foreach ($this->success as $iter803)
           {
-            $xfer += $iter789->write($output);
+            $xfer += $iter803->write($output);
           }
         }
         $output->writeListEnd();
@@ -23253,15 +23592,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size790 = 0;
-            $_etype793 = 0;
-            $xfer += $input->readListBegin($_etype793, $_size790);
-            for ($_i794 = 0; $_i794 < $_size790; ++$_i794)
+            $_size804 = 0;
+            $_etype807 = 0;
+            $xfer += $input->readListBegin($_etype807, $_size804);
+            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
             {
-              $elem795 = null;
-              $elem795 = new \metastore\PartitionSpec();
-              $xfer += $elem795->read($input);
-              $this->success []= $elem795;
+              $elem809 = null;
+              $elem809 = new \metastore\PartitionSpec();
+              $xfer += $elem809->read($input);
+              $this->success []= $elem809;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23305,9 +23644,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter796)
+          foreach ($this->success as $iter810)
           {
-            $xfer += $iter796->write($output);
+            $xfer += $iter810->write($output);
           }
         }
         $output->writeListEnd();
@@ -23627,14 +23966,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size797 = 0;
-            $_etype800 = 0;
-            $xfer += $input->readListBegin($_etype800, $_size797);
-            for ($_i801 = 0; $_i801 < $_size797; ++$_i801)
+            $_size811 = 0;
+            $_etype814 = 0;
+            $xfer += $input->readListBegin($_etype814, $_size811);
+            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
             {
-              $elem802 = null;
-              $xfer += $input->readString($elem802);
-              $this->names []= $elem802;
+              $elem816 = null;
+              $xfer += $input->readString($elem816);
+              $this->names []= $elem816;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23672,9 +24011,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter803)
+          foreach ($this->names as $iter817)
           {
-            $xfer += $output->writeString($iter803);
+            $xfer += $output->writeString($iter817);
           }
         }
         $output->writeListEnd();
@@ -23763,15 +24102,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size804 = 0;
-            $_etype807 = 0;
-            $xfer += $input->readListBegin($_etype807, $_size804);
-            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
+            $_size818 = 0;
+            $_etype821 = 0;
+            $xfer += $input->readListBegin($_etype821, $_size818);
+            for ($_i822 = 0; $_i822 < $_size818; ++$_i822)
             {
-              $elem809 = null;
-              $elem809 = new \metastore\Partition();
-              $xfer += $elem809->read($input);
-              $this->success []= $elem809;
+              $elem823 = null;
+              $elem823 = new \metastore\Partition();
+              $xfer += $elem823->read($input);
+              $this->success []= $elem823;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23815,9 +24154,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter810)
+          foreach ($this->success as $iter824)
           {
-            $xfer += $iter810->write($output);
+            $xfer += $iter824->write($output);
           }
         }
         $output->writeListEnd();
@@ -24156,15 +24495,15 @@ class ThriftHiveMetastore_alter_partitions_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size811 = 0;
-            $_etype814 = 0;
-            $xfer += $input->readListBegin($_etype814, $_size811);
-            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
+            $_size825 = 0;
+            $_etype828 = 0;
+            $xfer += $input->readListBegin($_etype828, $_size825);
+            for ($_i829 = 0; $_i829 < $_size825; ++$_i829)
             {
-              $elem816 = null;
-              $elem816 = new \metastore\Partition();
-              $xfer += $elem816->read($input);
-              $this->new_parts []= $elem816;
+              $elem830 = null;
+              $elem830 = new \metastore\Partition();
+              $xfer += $elem830->read($input);
+              $this->new_parts []= $elem830;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24202,9 +24541,9 @@ class ThriftHiveMetastore_alter_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter817)
+          foreach ($this->new_parts as $iter831)
           {
-            $xfer += $iter817->write($output);
+            $xfer += $iter831->write($output);
           }
         }
         $output->writeListEnd();
@@ -24674,14 +25013,14 @@ class ThriftHiveMetastore_rename_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size818 = 0;
-            $_etype821 = 0;
-            $xfer += $input->readListBegin($_etype821, $_size818);
-            for ($_i822 = 0; $_i822 < $_size818; ++$_i822)
+            $_size832 = 0;
+            $_etype835 = 0;
+            $xfer += $input->readListBegin($_etype835, $_size832);
+            for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
             {
-              $elem823 = null;
-              $xfer += $input->readString($elem823);
-              $this->part_vals []= $elem823;
+              $elem837 = null;
+              $xfer += $input->readString($elem837);
+              $this->part_vals []= $elem837;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24727,9 +25066,9 @@ class ThriftHiveMetastore_rename_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter824)
+          foreach ($this->part_vals as $iter838)
           {
-            $xfer += $output->writeString($iter824);
+            $xfer += $output->writeString($iter838);
           }
         }
         $output->writeListEnd();
@@ -24914,14 +25253,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size825 = 0;
-            $_etype828 = 0;
-            $xfer += $input->readListBegin($_etype828, $_size825);
-            for ($_i829 = 0; $_i829 < $_size825; ++$_i829)
+            $_size839 = 0;
+            $_etype842 = 0;
+            $xfer += $input->readListBegin($_etype842, $_size839);
+            for ($_i843 = 0; $_i843 < $_size839; ++$_i843)
             {
-              $elem830 = null;
-              $xfer += $input->readString($elem830);
-              $this->part_vals []= $elem830;
+              $elem844 = null;
+              $xfer += $input->readString($elem844);
+              $this->part_vals []= $elem844;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24956,9 +25295,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter831)
+          foreach ($this->part_vals as $iter845)
           {
-            $xfer += $output->writeString($iter831);
+            $xfer += $output->writeString($iter845);
           }
         }
         $output->writeListEnd();
@@ -25412,14 +25751,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size832 = 0;
-            $_etype835 = 0;
-            $xfer += $input->readListBegin($_etype835, $_size832);
-            for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
+            $_size846 = 0;
+            $_etype849 = 0;
+            $xfer += $input->readListBegin($_etype849, $_size846);
+            for ($_i850 = 0; $_i850 < $_size846; ++$_i850)
             {
-              $elem837 = null;
-              $xfer += $input->readString($elem837);
-              $this->success []= $elem837;
+              $elem851 = null;
+              $xfer += $input->readString($elem851);
+              $this->success []= $elem851;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25455,9 +25794,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter838)
+          foreach ($this->success as $iter852)
           {
-            $xfer += $output->writeString($iter838);
+            $xfer += $output->writeString($iter852);
           }
         }
         $output->writeListEnd();
@@ -25617,17 +25956,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size839 = 0;
-            $_ktype840 = 0;
-            $_vtype841 = 0;
-            $xfer += $input->readMapBegin($_ktype840, $_vtype841, $_size839);
-            for ($_i843 = 0; $_i843 < $_size839; ++$_i843)
+            $_size853 = 0;
+            $_ktype854 = 0;
+            $_vtype855 = 0;
+            $xfer += $input->readMapBegin($_ktype854, $_vtype855, $_size853);
+            for ($_i857 = 0; $_i857 < $_size853; ++$_i857)
             {
-              $key844 = '';
-              $val845 = '';
-              $xfer += $input->readString($key844);
-              $xfer += $input->readString($val845);
-              $this->success[$key844] = $val845;
+              $key858 = '';
+              $val859 = '';
+              $xfer += $input->readString($key858);
+              $xfer += $input->readString($val859);
+              $this->success[$key858] = $val859;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -25663,10 +26002,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter846 => $viter847)
+          foreach ($this->success as $kiter860 => $viter861)
           {
-            $xfer += $output->writeString($kiter846);
-            $xfer += $output->writeString($viter847);
+            $xfer += $output->writeString($kiter860);
+            $xfer += $output->writeString($viter861);
           }
         }
         $output->writeMapEnd();
@@ -25786,17 +26125,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size848 = 0;
-            $_ktype849 = 0;
-            $_vtype850 = 0;
-            $xfer += $input->readMapBegin($_ktype849, $_vtype850, $_size848);
-            for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
+            $_size862 = 0;
+            $_ktype863 = 0;
+            $_vtype864 = 0;
+            $xfer += $input->readMapBegin($_ktype863, $_vtype864, $_size862);
+            for ($_i866 = 0; $_i866 < $_size862; ++$_i866)
             {
-              $key853 = '';
-              $val854 = '';
-              $xfer += $input->readString($key853);
-              $xfer += $input->readString($val854);
-              $this->part_vals[$key853] = $val854;
+              $key867 = '';
+              $val868 = '';
+              $xfer += $input->readString($key867);
+              $xfer += $input->readString($val868);
+              $this->part_vals[$key867] = $val868;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -25841,10 +26180,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter855 => $viter856)
+          foreach ($this->part_vals as $kiter869 => $viter870)
           {
-            $xfer += $output->writeString($kiter855);
-            $xfer += $output->writeString($viter856);
+            $xfer += $output->writeString($kiter869);
+            $xfer += $output->writeString($viter870);
           }
         }
         $output->writeMapEnd();
@@ -26166,17 +26505,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size857 = 0;
-            $_ktype858 = 0;
-            $_vtype859 = 0;
-            $xfer += $input->readMapBegin($_ktype858, $_vtype859, $_size857);
-            for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
+            $_size871 = 0;
+            $_ktype872 = 0;
+            $_vtype873 = 0;
+            $xfer += $input->readMapBegin($_ktype872, $_vtype873, $_size871);
+            for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
             {
-              $key862 = '';
-              $val863 = '';
-              $xfer += $input->readString($key862);
-              $xfer += $input->readString($val863);
-              $this->part_vals[$key862] = $val863;
+              $key876 = '';
+              $val877 = '';
+              $xfer += $input->readString($key876);
+              $xfer += $input->readString($val877);
+              $this->part_vals[$key876] = $val877;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -26221,10 +26560,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter864 => $viter865)
+          foreach ($this->part_vals as $kiter878 => $viter879)
           {
-            $xfer += $output->writeString($kiter864);
-            $xfer += $output->writeString($viter865);
+            $xfer += $output->writeString($kiter878);
+            $xfer += $output->writeString($viter879);
           }
         }
         $output->writeMapEnd();
@@ -27698,15 +28037,15 @@ class ThriftHiveMetastore_get_indexes_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size866 = 0;
-            $_etype869 = 0;
-            $xfer += $input->readListBegin($_etype869, $_size866);
-            for ($_i870 = 0; $_i870 < $_size866; ++$_i870)
+            $_size880 = 0;
+            $_etype883 = 0;
+            $xfer += $input->readListBegin($_etype883, $_size880);
+            for ($_i884 = 0; $_i884 < $_size880; ++$_i884)
             {
-              $elem871 = null;
-              $elem871 = new \metastore\Index();
-              $xfer += $elem871->read($input);
-              $this->success []= $elem871;
+              $elem885 = null;
+              $elem885 = new \metastore\Index();
+              $xfer += $elem885->read($input);
+              $this->success []= $elem885;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27750,9 +28089,9 @@ class ThriftHiveMetastore_get_indexes_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter872)
+          foreach ($this->success as $iter886)
           {
-            $xfer += $iter872->write($output);
+            $xfer += $iter886->write($output);
           }
         }
         $output->writeListEnd();
@@ -27959,14 +28298,14 @@ class ThriftHiveMetastore_get_index_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size873 = 0;
-            $_etype876 = 0;
-            $xfer += $input->readListBegin($_etype876, $_size873);
-            for ($_i877 = 0; $_i877 < $_size873; ++$_i877)
+            $_size887 = 0;
+            $_etype890 = 0;
+            $xfer += $input->readListBegin($_etype890, $_size887);
+            for ($_i891 = 0; $_i891 < $_size887; ++$_i891)
             {
-              $elem878 = null;
-              $xfer += $input->readString($elem878);
-              $this->success []= $elem878;
+              $elem892 = null;
+              $xfer += $input->readString($elem892);
+              $this->success []= $elem892;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28002,9 +28341,9 @@ class ThriftHiveMetastore_get_index_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter879)
+          foreach ($this->success as $iter893)
           {
-            $xfer += $output->writeString($iter879);
+            $xfer += $output->writeString($iter893);
           }
         }
         $output->writeListEnd();
@@ -31478,14 +31817,14 @@ class ThriftHiveMetastore_get_functions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size880 = 0;
-            $_etype883 = 0;
-            $xfer += $input->readListBegin($_etype883, $_size880);
-            for ($_i884 = 0; $_i884 < $_size880; ++$_i884)
+            $_size894 = 0;
+            $_etype897 = 0;
+            $xfer += $input->readListBegin($_etype897, $_size894);
+            for ($_i898 = 0; $_i898 < $_size894; ++$_i898)
             {
-              $elem885 = null;
-              $xfer += $input->readString($elem885);
-              $this->success []= $elem885;
+              $elem899 = null;
+              $xfer += $input->readString($elem899);
+              $this->success []= $elem899;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31521,9 +31860,9 @@ class ThriftHiveMetastore_get_functions_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter886)
+          foreach ($this->success as $iter900)
           {
-            $xfer += $output->writeString($iter886);
+            $xfer += $output->writeString($iter900);
           }
         }
         $output->writeListEnd();
@@ -32392,14 +32731,14 @@ class ThriftHiveMetastore_get_role_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size887 = 0;
-            $_etype890 = 0;
-            $xfer += $input->readListBegin($_etype890, $_size887);
-            for ($_i891 = 0; $_i891 < $_size887; ++$_i891)
+            $_size901 = 0;
+            $_etype904 = 0;
+            $xfer += $input->readListBegin($_etype904, $_size901);
+            for ($_i905 = 0; $_i905 < $_size901; ++$_i905)
             {
-              $elem892 = null;
-              $xfer += $input->readString($elem892);
-              $this->success []= $elem892;
+              $elem906 = null;
+              $xfer += $input->readString($elem906);
+              $this->success []= $elem906;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32435,9 +32774,9 @@ class ThriftHiveMetastore_get_role_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter893)
+          foreach ($this->success as $iter907)
           {
-            $xfer += $output->writeString($iter893);
+            $xfer += $output->writeString($iter907);
           }
         }
         $output->writeListEnd();
@@ -33128,15 +33467,15 @@ class ThriftHiveMetastore_list_roles_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size894 = 0;
-            $_etype897 = 0;
-            $xfer += $input->readListBegin($_etype897, $_size894);
-            for ($_i898 = 0; $_i898 < $_size894; ++$_i898)
+            $_size908 = 0;
+            $_etype911 = 0;
+            $xfer += $input->readListBegin($_etype911, $_size908);
+            for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
             {
-              $elem899 = null;
-              $elem899 = new \metastore\Role();
-              $xfer += $elem899->read($input);
-              $this->success []= $elem899;
+              $elem913 = null;
+              $elem913 = new \metastore\Role();
+              $xfer += $elem913->read($input);
+              $this->success []= $elem913;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33172,9 +33511,9 @@ class ThriftHiveMetastore_list_roles_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter900)
+          foreach ($this->success as $iter914)
           {
-            $xfer += $iter900->write($output);
+            $xfer += $iter914->write($output);
           }
         }
         $output->writeListEnd();
@@ -33836,14 +34175,14 @@ class ThriftHiveMetastore_get_privilege_set_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size901 = 0;
-            $_etype904 = 0;
-            $xfer += $input->readListBegin($_etype904, $_size901);
-            for ($_i905 = 0; $_i905 < $_size901; ++$_i905)
+            $_size915 = 0;
+            $_etype918 = 0;
+            $xfer += $input->readListBegin($_etype918, $_size915);
+            for ($_i919 = 0; $_i919 < $_size915; ++$_i919)
             {
-              $elem906 = null;
-              $xfer += $input->readString($elem906);
-              $this->group_names []= $elem906;
+              $elem920 = null;
+              $xfer += $input->readString($elem920);
+              $this->group_names []= $elem920;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33884,9 +34223,9 @@ class ThriftHiveMetastore_get_privilege_set_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter907)
+          foreach ($this->group_names as $iter921)
           {
-            $xfer += $output->writeString($iter907);
+            $xfer += $output->writeString($iter921);
           }
         }
         $output->writeListEnd();
@@ -34194,15 +34533,15 @@ class ThriftHiveMetastore_list_privileges_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size908 = 0;
-            $_etype911 = 0;
-            $xfer += $input->readListBegin($_etype911, $_size908);
-            for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
+            $_size922 = 0;
+            $_etype925 = 0;
+            $xfer += $input->readListBegin($_etype925, $_size922);
+            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
             {
-              $elem913 = null;
-              $elem913 = new \metastore\HiveObjectPrivilege();
-              $xfer += $elem913->read($input);
-              $this->success []= $elem913;
+              $elem927 = null;
+              $elem927 = new \metastore\HiveObjectPrivilege();
+              $xfer += $elem927->read($input);
+              $this->success []= $elem927;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34238,9 +34577,9 @@ class ThriftHiveMetastore_list_privileges_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter914)
+          foreach ($this->success as $iter928)
           {
-            $xfer += $iter914->write($output);
+            $xfer += $iter928->write($output);
           }
         }
         $output->writeListEnd();
@@ -34872,14 +35211,14 @@ class ThriftHiveMetastore_set_ugi_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size915 = 0;
-            $_etype918 = 0;
-            $xfer += $input->readListBegin($_etype918, $_size915);
-            for ($_i919 = 0; $_i919 < $_size915; ++$_i919)
+            $_size929 = 0;
+            $_etype932 = 0;
+            $xfer += $input->readListBegin($_etype932, $_size929);
+            for ($_i933 = 0; $_i933 < $_size929; ++$_i933)
             {
-              $elem920 = null;
-              $xfer += $input->readString($elem920);
-              $this->group_names []= $elem920;
+              $elem934 = null;
+              $xfer += $input->readString($elem934);
+              $this->group_names []= $elem934;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34912,9 +35251,9 @@ class ThriftHiveMetastore_set_ugi_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter921)
+          foreach ($this->group_names as $iter935)
           {
-            $xfer += $output->writeString($iter921);
+            $xfer += $output->writeString($iter935);
           }
         }
         $output->writeListEnd();
@@ -34990,14 +35329,14 @@ class ThriftHiveMetastore_set_ugi_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size922 = 0;
-            $_etype925 = 0;
-            $xfer += $input->readListBegin($_etype925, $_size922);
-            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
+            $_size936 = 0;
+            $_etype939 = 0;
+            $xfer += $input->readListBegin($_etype939, $_size936);
+            for ($_i940 = 0; $_i940 < $_size936; ++$_i940)
             {
-              $elem927 = null;
-              $xfer += $input->readString($elem927);
-              $this->success []= $elem927;
+              $elem941 = null;
+              $xfer += $input->readString($elem941);
+              $this->success []= $elem941;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -35033,9 +35372,9 @@ class ThriftHiveMetastore_set_ugi_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter928)
+          foreach ($this->success as $iter942)
           {
-            $xfer += $output->writeString($iter928);
+            $xfer += $output->writeString($iter942);
           }
         }
         $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index e63213d..e43a13d 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -14957,6 +14957,150 @@ class GetAllFunctionsResponse {
 
 }
 
+class TableMeta {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $dbName = null;
+  /**
+   * @var string
+   */
+  public $tableName = null;
+  /**
+   * @var string
+   */
+  public $tableType = null;
+  /**
+   * @var string
+   */
+  public $comments = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'dbName',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'tableName',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'tableType',
+          'type' => TType::STRING,
+          ),
+        4 => array(
+          'var' => 'comments',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['dbName'])) {
+        $this->dbName = $vals['dbName'];
+      }
+      if (isset($vals['tableName'])) {
+        $this->tableName = $vals['tableName'];
+      }
+      if (isset($vals['tableType'])) {
+        $this->tableType = $vals['tableType'];
+      }
+      if (isset($vals['comments'])) {
+        $this->comments = $vals['comments'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'TableMeta';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dbName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->tableName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->tableType);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->comments);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('TableMeta');
+    if ($this->dbName !== null) {
+      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
+      $xfer += $output->writeString($this->dbName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tableName !== null) {
+      $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2);
+      $xfer += $output->writeString($this->tableName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tableType !== null) {
+      $xfer += $output->writeFieldBegin('tableType', TType::STRING, 3);
+      $xfer += $output->writeString($this->tableType);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->comments !== null) {
+      $xfer += $output->writeFieldBegin('comments', TType::STRING, 4);
+      $xfer += $output->writeString($this->comments);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class MetaException extends TException {
   static $_TSPEC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 65ba10e..22d794f 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -45,6 +45,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  void drop_table(string dbname, string name, bool deleteData)')
   print('  void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
   print('   get_tables(string db_name, string pattern)')
+  print('   get_table_meta(string db_patterns, string tbl_patterns,  tbl_types)')
   print('   get_all_tables(string db_name)')
   print('  Table get_table(string dbname, string tbl_name)')
   print('   get_table_objects_by_name(string dbname,  tbl_names)')
@@ -349,6 +350,12 @@ elif cmd == 'get_tables':
     sys.exit(1)
   pp.pprint(client.get_tables(args[0],args[1],))
 
+elif cmd == 'get_table_meta':
+  if len(args) != 3:
+    print('get_table_meta requires 3 args')
+    sys.exit(1)
+  pp.pprint(client.get_table_meta(args[0],args[1],eval(args[2]),))
+
 elif cmd == 'get_all_tables':
   if len(args) != 1:
     print('get_all_tables requires 1 args')


[29/55] [abbrv] hive git commit: HIVE-12310: Update memory estimation login in TopNHash (Hari Subramaniyan, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-12310: Update memory estimation login in TopNHash (Hari Subramaniyan, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/678b77b5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/678b77b5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/678b77b5

Branch: refs/heads/spark
Commit: 678b77b5be2e97581a49fdb47614274f1ea8a7a5
Parents: 6ba735f
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Fri Nov 6 10:39:25 2015 -0800
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Fri Nov 6 10:39:25 2015 -0800

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/678b77b5/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
index 46b3510..e400368 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
@@ -103,8 +103,13 @@ public class TopNHash {
       return; // topN == 0 will cause a short-circuit, don't need any initialization
     }
 
+    // Used Memory = totalMemory() - freeMemory();
+    // Total Free Memory = maxMemory() - Used Memory;
+    long totalFreeMemory = Runtime.getRuntime().maxMemory() -
+      Runtime.getRuntime().totalMemory() + Runtime.getRuntime().freeMemory();
+
     // limit * 64 : compensation of arrays for key/value/hashcodes
-    this.threshold = (long) (memUsage * Runtime.getRuntime().freeMemory()) - topN * 64L;
+    this.threshold = (long) (memUsage * totalFreeMemory) - topN * 64L;
     if (threshold < 0) {
       return;
     }


[21/55] [abbrv] hive git commit: HIVE-12207 : Query fails when non-ascii characters are used in string literals (Aleksei Statkevich via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12207 : Query fails when non-ascii characters are used in string literals (Aleksei Statkevich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a8eb4aef
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a8eb4aef
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a8eb4aef

Branch: refs/heads/spark
Commit: a8eb4aef496568fccbde4898d42c2c14875f7c03
Parents: d06b69f
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Sat Oct 17 23:37:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 14:16:31 2015 -0800

----------------------------------------------------------------------
 .../calcite/translator/RexNodeConverter.java    | 13 ++++++++---
 .../queries/clientpositive/non_ascii_literal1.q |  1 +
 .../queries/clientpositive/non_ascii_literal2.q |  5 +++++
 .../clientpositive/non_ascii_literal1.q.out     |  9 ++++++++
 .../clientpositive/non_ascii_literal2.q.out     | 23 ++++++++++++++++++++
 5 files changed, 48 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a8eb4aef/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index d315497..631a4ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -38,11 +38,14 @@ import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.SqlCollation;
 import org.apache.calcite.sql.SqlIntervalQualifier;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlCastFunction;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ConversionUtil;
+import org.apache.calcite.util.NlsString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.type.Decimal128;
@@ -301,6 +304,10 @@ public class RexNodeConverter {
   private static final BigInteger MIN_LONG_BI = BigInteger.valueOf(Long.MIN_VALUE),
       MAX_LONG_BI = BigInteger.valueOf(Long.MAX_VALUE);
 
+  private static NlsString asUnicodeString(String text) {
+    return new NlsString(text, ConversionUtil.NATIVE_UTF16_CHARSET_NAME, SqlCollation.IMPLICIT);
+  }
+
   protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticException {
     RexBuilder rexBuilder = cluster.getRexBuilder();
     RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
@@ -377,16 +384,16 @@ public class RexNodeConverter {
       if (value instanceof HiveChar) {
         value = ((HiveChar) value).getValue();
       }
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case VARCHAR:
       if (value instanceof HiveVarchar) {
         value = ((HiveVarchar) value).getValue();
       }
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case STRING:
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case DATE:
       Calendar cal = new GregorianCalendar();

http://git-wip-us.apache.org/repos/asf/hive/blob/a8eb4aef/ql/src/test/queries/clientpositive/non_ascii_literal1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/non_ascii_literal1.q b/ql/src/test/queries/clientpositive/non_ascii_literal1.q
new file mode 100644
index 0000000..9573653
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/non_ascii_literal1.q
@@ -0,0 +1 @@
+select concat("Абвгде", "谢谢") from src limit 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/a8eb4aef/ql/src/test/queries/clientpositive/non_ascii_literal2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/non_ascii_literal2.q b/ql/src/test/queries/clientpositive/non_ascii_literal2.q
new file mode 100644
index 0000000..6b25273
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/non_ascii_literal2.q
@@ -0,0 +1,5 @@
+create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2;
+
+select * from non_ascii_literal2
+where col2 = "Абвгде";

http://git-wip-us.apache.org/repos/asf/hive/blob/a8eb4aef/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/non_ascii_literal1.q.out b/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
new file mode 100644
index 0000000..5b28f4e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: select concat("Абвгде", "谢谢") from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select concat("Абвгде", "谢谢") from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+Абвгде谢谢

http://git-wip-us.apache.org/repos/asf/hive/blob/a8eb4aef/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/non_ascii_literal2.q.out b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
new file mode 100644
index 0000000..7e19143
--- /dev/null
+++ b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
@@ -0,0 +1,23 @@
+PREHOOK: query: create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@non_ascii_literal2
+POSTHOOK: query: create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@non_ascii_literal2
+PREHOOK: query: select * from non_ascii_literal2
+where col2 = "Абвгде"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@non_ascii_literal2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from non_ascii_literal2
+where col2 = "Абвгде"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@non_ascii_literal2
+#### A masked pattern was here ####
+谢谢	Абвгде


[13/55] [abbrv] hive git commit: HIVE-12333: tez_union_with_udf.q added to wrong section in testconfiguration.properties (Jason Dere, reviewed by Chinna Lalam)

Posted by xu...@apache.org.
HIVE-12333: tez_union_with_udf.q added to wrong section in testconfiguration.properties (Jason Dere, reviewed by Chinna Lalam)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0e94a1d9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0e94a1d9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0e94a1d9

Branch: refs/heads/spark
Commit: 0e94a1d9dc1ddbf2e6292cc6e57e0fa2e83350d8
Parents: 9ba2cdf
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed Nov 4 17:14:34 2015 -0800
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed Nov 4 17:14:34 2015 -0800

----------------------------------------------------------------------
 itests/src/test/resources/testconfiguration.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0e94a1d9/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 2d1d274..d16c318 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -379,6 +379,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   tez_union2.q,\
   tez_union_dynamic_partition.q,\
   tez_union_view.q,\
+  tez_union_with_udf.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
   tez_smb_main.q,\
@@ -424,7 +425,6 @@ minillap.query.files=bucket_map_join_tez1.q,\
   tez_union_view.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
-  tez_union_with_udf.q,\
   tez_smb_main.q,\
   tez_smb_1.q,\
   vectorized_dynamic_partition_pruning.q,\


[50/55] [abbrv] hive git commit: HIVE-12309 : TableScan should colStats when available for better data size estimate (Ashutosh Chauhan via Prasanth J)

Posted by xu...@apache.org.
HIVE-12309 : TableScan should colStats when available for better data size estimate (Ashutosh Chauhan via Prasanth J)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4f7f8820
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4f7f8820
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4f7f8820

Branch: refs/heads/spark
Commit: 4f7f882049e2dd3e055d359d057618e608098c61
Parents: b678ed8
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Fri Oct 30 15:03:42 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Nov 10 14:56:51 2015 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |  71 ++--
 .../annotate_stats_deep_filters.q.out           |   2 +-
 .../clientpositive/annotate_stats_filter.q.out  |  48 +--
 .../clientpositive/annotate_stats_groupby.q.out |  56 +--
 .../annotate_stats_groupby2.q.out               |  32 +-
 .../clientpositive/annotate_stats_join.q.out    |  34 +-
 .../annotate_stats_join_pkfk.q.out              |  50 +--
 .../clientpositive/annotate_stats_limit.q.out   |   8 +-
 .../clientpositive/annotate_stats_part.q.out    |  14 +-
 .../clientpositive/annotate_stats_select.q.out  |  24 +-
 .../clientpositive/annotate_stats_table.q.out   |  12 +-
 .../clientpositive/annotate_stats_union.q.out   |  20 +-
 .../clientpositive/cbo_rp_auto_join0.q.out      |   8 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      |  30 +-
 .../results/clientpositive/cbo_rp_join0.q.out   |  14 +-
 .../extrapolate_part_stats_full.q.out           |   8 +-
 .../extrapolate_part_stats_partial.q.out        |  12 +-
 .../extrapolate_part_stats_partial_ndv.q.out    |   6 +-
 .../clientpositive/llap/llapdecider.q.out       |  46 +--
 .../spark/annotate_stats_join.q.out             |  34 +-
 .../results/clientpositive/stats_ppr_all.q.out  |   8 +-
 .../clientpositive/tez/explainuser_1.q.out      | 352 +++++++++----------
 .../clientpositive/tez/llapdecider.q.out        |  46 +--
 23 files changed, 473 insertions(+), 462 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index e1f8ebc..71ed31c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -138,6 +138,39 @@ public class StatsUtils {
         fetchColStats, fetchPartStats);
   }
 
+  private static long getDataSize(HiveConf conf, Table table) {
+    long ds = getRawDataSize(table);
+    if (ds <= 0) {
+      ds = getTotalSize(table);
+
+      // if data size is still 0 then get file size
+      if (ds <= 0) {
+        ds = getFileSizeForTable(conf, table);
+      }
+      float deserFactor =
+          HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_DESERIALIZATION_FACTOR);
+      ds = (long) (ds * deserFactor);
+    }
+
+    return ds;
+  }
+
+  private static long getNumRows(HiveConf conf, List<ColumnInfo> schema, List<String> neededColumns, Table table, long ds) {
+    long nr = getNumRows(table);
+ // number of rows -1 means that statistics from metastore is not reliable
+    // and 0 means statistics gathering is disabled
+    if (nr <= 0) {
+      int avgRowSize = estimateRowSizeFromSchema(conf, schema, neededColumns);
+      if (avgRowSize > 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Estimated average row size: " + avgRowSize);
+        }
+        nr = ds / avgRowSize;
+      }
+    }
+    return nr == 0 ? 1 : nr;
+  }
+
   public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList,
       Table table, List<ColumnInfo> schema, List<String> neededColumns,
       List<String> referencedColumns, boolean fetchColStats, boolean fetchPartStats)
@@ -149,41 +182,17 @@ public class StatsUtils {
         HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_DESERIALIZATION_FACTOR);
 
     if (!table.isPartitioned()) {
-      long nr = getNumRows(table);
-      long ds = getRawDataSize(table);
-      if (ds <= 0) {
-        ds = getTotalSize(table);
 
-        // if data size is still 0 then get file size
-        if (ds <= 0) {
-          ds = getFileSizeForTable(conf, table);
-        }
-
-        ds = (long) (ds * deserFactor);
-      }
-
-      // number of rows -1 means that statistics from metastore is not reliable
-      // and 0 means statistics gathering is disabled
-      if (nr <= 0) {
-        int avgRowSize = estimateRowSizeFromSchema(conf, schema, neededColumns);
-        if (avgRowSize > 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Estimated average row size: " + avgRowSize);
-          }
-          nr = ds / avgRowSize;
-        }
-      }
-      if (nr == 0) {
-        nr = 1;
-      }
+      long ds = getDataSize(conf, table);
+      long nr = getNumRows(conf, schema, neededColumns, table, ds);
       stats.setNumRows(nr);
-      stats.setDataSize(ds);
-
       List<ColStatistics> colStats = Lists.newArrayList();
       if (fetchColStats) {
         colStats = getTableColumnStats(table, schema, neededColumns);
+        long betterDS = getDataSizeFromColumnStats(nr, colStats);
+        ds = betterDS < 1 ? ds : betterDS;
       }
-
+       stats.setDataSize(ds);
       // infer if any column can be primary key based on column statistics
       inferAndSetPrimaryKey(stats.getNumRows(), colStats);
 
@@ -276,11 +285,13 @@ public class StatsUtils {
             LOG.debug("Column stats requested for : " + neededColumns.size() + " columns. Able to" +
                 " retrieve for " + colStats.size() + " columns");
           }
+
           List<ColStatistics> columnStats = convertColStats(colStats, table.getTableName());
 
           addParitionColumnStats(conf, neededColumns, referencedColumns, schema, table, partList,
               columnStats);
-
+          long betterDS = getDataSizeFromColumnStats(nr, columnStats);
+          stats.setDataSize(betterDS < 1 ? ds : betterDS);
           // infer if any column can be primary key based on column statistics
           inferAndSetPrimaryKey(stats.getNumRows(), columnStats);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
index fc4f294..20ccda5 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
@@ -118,7 +118,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over1k
-            Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2098 Data size: 16736 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean)
               Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index 054b573..f13fdb7 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -141,7 +141,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (state = 'OH') (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -181,7 +181,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (state <> 'OH') (type: boolean)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
@@ -217,7 +217,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (state <> 'OH') (type: boolean)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
@@ -257,7 +257,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: zip is null (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -295,7 +295,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: zip is null (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -335,7 +335,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: zip is not null (type: boolean)
               Statistics: Num rows: 7 Data size: 702 Basic stats: COMPLETE Column stats: COMPLETE
@@ -373,7 +373,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: zip is not null (type: boolean)
               Statistics: Num rows: 7 Data size: 702 Basic stats: COMPLETE Column stats: COMPLETE
@@ -413,7 +413,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -436,7 +436,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -476,7 +476,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -499,7 +499,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: 'foo' (type: string)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
@@ -537,7 +537,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -560,7 +560,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -598,7 +598,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -636,7 +636,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -676,7 +676,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((state = 'OH') or (state = 'CA')) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
@@ -716,7 +716,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((year = 2001) and year is null) (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -754,7 +754,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((year = 2001) and (state = 'OH')) and (state = 'FL')) (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -794,7 +794,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((year = 2001) and year is null) or (state = 'CA')) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
@@ -834,7 +834,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((year = 2001) or year is null) and (state = 'CA')) (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
@@ -874,7 +874,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid < 30) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
@@ -910,7 +910,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid > 30) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
@@ -946,7 +946,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid <= 30) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
@@ -982,7 +982,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid >= 30) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
index 1b9ec68..68acacf 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
@@ -154,11 +154,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 aggregations: count()
                 keys: state (type: string), locid (type: int)
@@ -248,11 +248,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: year (type: int)
               outputColumnNames: year
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: year (type: int)
                 mode: hash
@@ -301,11 +301,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int)
                 mode: hash
@@ -354,11 +354,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -408,11 +408,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -462,11 +462,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -516,11 +516,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -570,11 +570,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -624,11 +624,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -682,30 +682,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: year (type: int)
               outputColumnNames: year
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: year (type: int)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -735,11 +735,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: state, locid
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
@@ -791,11 +791,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), zip (type: bigint)
               outputColumnNames: state, zip
-              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 keys: state (type: string), zip (type: bigint)
                 mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
index be3fa1d..0d53b70 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
@@ -199,21 +199,21 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: location
-            Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
               outputColumnNames: state, country
-              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 10 Data size: 1730 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 10 Data size: 1730 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -254,21 +254,21 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: location
-            Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 20 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), votes (type: bigint)
               outputColumnNames: state, votes
-              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 20 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 keys: state (type: string), votes (type: bigint)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 10 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: bigint)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                  Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 10 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
@@ -307,21 +307,21 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: location
-            Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
               outputColumnNames: state, country
-              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), country (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 32 Data size: 8256 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 40 Data size: 10320 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-                  Statistics: Num rows: 32 Data size: 8256 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 40 Data size: 10320 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
@@ -361,11 +361,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: location
-            Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
               outputColumnNames: state, country
-              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), country (type: string)
                 mode: hash
@@ -414,11 +414,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: location
-            Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
               outputColumnNames: state, country
-              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 keys: state (type: string), country (type: string), '0' (type: string)
                 mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
index bc44cc3..ee05e6e 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
@@ -164,7 +164,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -180,7 +180,7 @@ STAGE PLANS:
                   value expressions: _col0 (type: string), _col2 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -235,7 +235,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: emp
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and lastname is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -251,7 +251,7 @@ STAGE PLANS:
                   value expressions: _col2 (type: int)
           TableScan
             alias: dept
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -301,7 +301,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and lastname is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -317,7 +317,7 @@ STAGE PLANS:
                   value expressions: _col2 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -371,7 +371,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: emp
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and lastname is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -387,7 +387,7 @@ STAGE PLANS:
                   value expressions: _col2 (type: int)
           TableScan
             alias: dept
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -441,7 +441,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -457,7 +457,7 @@ STAGE PLANS:
                   value expressions: _col0 (type: string), _col2 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -473,7 +473,7 @@ STAGE PLANS:
                   value expressions: _col1 (type: string)
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -528,7 +528,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -544,7 +544,7 @@ STAGE PLANS:
                   value expressions: _col0 (type: string), _col2 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: deptid is not null (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -560,7 +560,7 @@ STAGE PLANS:
                   value expressions: _col1 (type: string)
           TableScan
             alias: l
-            Statistics: Num rows: 8 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: locid is not null (type: boolean)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
@@ -617,7 +617,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e
-            Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and lastname is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -633,7 +633,7 @@ STAGE PLANS:
                   value expressions: _col2 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -648,7 +648,7 @@ STAGE PLANS:
                   Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: l
-            Statistics: Num rows: 8 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid is not null and state is not null) (type: boolean)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index c864c04..aa380b2 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -274,7 +274,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ss_store_sk is not null (type: boolean)
               Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
@@ -289,7 +289,7 @@ STAGE PLANS:
                   Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -343,7 +343,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (ss_store_sk > 0) (type: boolean)
               Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
@@ -358,7 +358,7 @@ STAGE PLANS:
                   Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 0) (type: boolean)
               Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
@@ -412,7 +412,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 7668 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((ss_quantity > 10) and ss_store_sk is not null) (type: boolean)
               Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
@@ -427,7 +427,7 @@ STAGE PLANS:
                   Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: PARTIAL
             Filter Operator
               predicate: ((s_company_id > 0) and s_store_sk is not null) (type: boolean)
               Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
@@ -481,7 +481,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ss_store_sk is not null (type: boolean)
               Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
@@ -496,7 +496,7 @@ STAGE PLANS:
                   Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((s_floor_space > 0) and s_store_sk is not null) (type: boolean)
               Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
@@ -550,7 +550,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 7668 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((ss_quantity > 10) and ss_store_sk is not null) (type: boolean)
               Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
@@ -565,7 +565,7 @@ STAGE PLANS:
                   Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -619,7 +619,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ss_store_sk is not null (type: boolean)
               Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
@@ -634,7 +634,7 @@ STAGE PLANS:
                   Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -649,7 +649,7 @@ STAGE PLANS:
                   Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -705,7 +705,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (ss_store_sk > 1000) (type: boolean)
               Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
@@ -720,7 +720,7 @@ STAGE PLANS:
                   Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 1000) (type: boolean)
               Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
@@ -735,7 +735,7 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 1000) (type: boolean)
               Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
@@ -791,7 +791,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ss_store_sk is not null (type: boolean)
               Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
@@ -806,7 +806,7 @@ STAGE PLANS:
                   Statistics: Num rows: 964 Data size: 3716 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
               Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
@@ -821,7 +821,7 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -877,7 +877,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 7668 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((ss_quantity > 10) and ss_store_sk is not null) (type: boolean)
               Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
@@ -892,7 +892,7 @@ STAGE PLANS:
                   Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -907,7 +907,7 @@ STAGE PLANS:
                   Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -964,7 +964,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1000 Data size: 130523 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 7656 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (ss_addr_sk is not null and ss_store_sk is not null) (type: boolean)
               Statistics: Num rows: 916 Data size: 7012 Basic stats: COMPLETE Column stats: COMPLETE
@@ -980,7 +980,7 @@ STAGE PLANS:
                   value expressions: _col1 (type: int)
           TableScan
             alias: ca
-            Statistics: Num rows: 20 Data size: 2114 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ca_address_sk is not null (type: boolean)
               Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1020,7 +1020,7 @@ STAGE PLANS:
               Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
-            Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: s_store_sk is not null (type: boolean)
               Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
index 7300ea0..3c4109b 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -99,7 +99,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -127,7 +127,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -153,7 +153,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
index cf523cb..186f7af 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
@@ -348,7 +348,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 774 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string)
             outputColumnNames: _col0
@@ -396,7 +396,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 774 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int)
             outputColumnNames: _col0, _col1
@@ -419,7 +419,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int)
             outputColumnNames: _col0, _col1
@@ -465,7 +465,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 774 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 2192 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -490,7 +490,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
             Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -514,7 +514,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
             Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -538,7 +538,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
             Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
index 877037d..bd645c8 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
@@ -138,7 +138,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 2 Data size: 420 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), de1 (type: decimal(10,0)), ts1 (type: timestamp), da1 (type: timestamp), s1 (type: string), vc1 (type: varchar(5)), m1 (type: map<string,string>), l1 (type: array<int>), st1 (type: struct<c1:int,c2:string>)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
@@ -161,7 +161,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: bo1 (type: boolean)
             outputColumnNames: _col0
@@ -186,7 +186,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: i1 (type: int)
             outputColumnNames: _col0
@@ -209,7 +209,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: s1 (type: string)
             outputColumnNames: _col0
@@ -257,7 +257,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 246 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), s1 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
@@ -794,7 +794,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 2 Data size: 420 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), de1 (type: decimal(10,0)), ts1 (type: timestamp), da1 (type: timestamp), s1 (type: string), vc1 (type: varchar(5)), m1 (type: map<string,string>), l1 (type: array<int>), st1 (type: struct<c1:int,c2:string>), 11 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
@@ -821,7 +821,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: i1 (type: int)
             outputColumnNames: _col0
@@ -849,7 +849,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: alltypes_orc
-          Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: i1 (type: int)
             outputColumnNames: _col0
@@ -877,7 +877,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypes_orc
-            Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: i1 (type: int)
               outputColumnNames: _col0
@@ -961,7 +961,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypes_orc
-            Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: i1 (type: int)
               outputColumnNames: _col0
@@ -1090,7 +1090,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypes_orc
-            Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: bo1 (type: boolean)
               Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1128,7 +1128,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypes_orc
-            Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (not bo1) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
index ebc6c5b..0f80755 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
@@ -155,7 +155,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
@@ -180,7 +180,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: deptid (type: int)
             outputColumnNames: _col0
@@ -213,7 +213,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
@@ -236,7 +236,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 4368 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string)
             outputColumnNames: _col0
@@ -259,7 +259,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: deptid (type: int)
             outputColumnNames: _col0
@@ -282,7 +282,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/annotate_stats_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_union.q.out b/ql/src/test/results/clientpositive/annotate_stats_union.q.out
index e09dde3..af7e90a 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_union.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_union.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string)
             outputColumnNames: _col0
@@ -99,7 +99,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
@@ -115,7 +115,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
@@ -152,7 +152,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -175,7 +175,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3
@@ -191,7 +191,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3
@@ -318,7 +318,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
@@ -334,7 +334,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
@@ -371,7 +371,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_staging
-            Statistics: Num rows: 8 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
@@ -387,7 +387,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
index d1bc6d4..7822ad9 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
@@ -38,7 +38,7 @@ STAGE PLANS:
         a:cbo_t1:cbo_t3 
           TableScan
             alias: cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3060 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 6 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE
@@ -56,7 +56,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3060 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 6 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE
@@ -171,7 +171,7 @@ STAGE PLANS:
         a:cbo_t1:cbo_t3 
           TableScan
             alias: cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3060 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 6 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE
@@ -189,7 +189,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 3060 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 6 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE


[22/55] [abbrv] hive git commit: HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default (Ashutosh Chauhan via Jason Dere)

Posted by xu...@apache.org.
HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default (Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0add6378
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0add6378
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0add6378

Branch: refs/heads/spark
Commit: 0add63786d293e7323ef147a85b0c61523c1973a
Parents: a8eb4ae
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Nov 5 15:55:39 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 15:55:39 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 .../hive/hcatalog/cli/TestSemanticAnalysis.java |   1 +
 .../hive/hcatalog/api/TestHCatClient.java       |   2 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  13 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   | 118 ++-----------------
 .../hive/ql/parse/TypeCheckProcFactory.java     |   5 +-
 .../hive/ql/exec/TestFunctionRegistry.java      |   2 +-
 .../disallow_incompatible_type_change_on1.q     |   6 +-
 ql/src/test/queries/clientpositive/alter1.q     |   6 +-
 .../queries/clientpositive/avro_partitioned.q   |   3 +-
 .../columnarserde_create_shortcut.q             |   2 +
 ql/src/test/queries/clientpositive/input3.q     |  10 +-
 ql/src/test/queries/clientpositive/lineage3.q   |   3 +-
 .../clientpositive/orc_int_type_promotion.q     |   2 +
 .../clientpositive/parquet_schema_evolution.q   |   6 +-
 .../partition_wise_fileformat11.q               |   4 +-
 .../partition_wise_fileformat12.q               |   4 +-
 .../partition_wise_fileformat13.q               |   5 +-
 .../partition_wise_fileformat15.q               |   4 +-
 .../partition_wise_fileformat16.q               |   4 +-
 .../test/queries/clientpositive/rename_column.q |   4 +-
 .../disallow_incompatible_type_change_on1.q.out |   3 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |  95 ++++++++++++++-
 23 files changed, 154 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3ab73ad..98f9206 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -637,7 +637,7 @@ public class HiveConf extends Configuration {
         "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
         "pruning is the correct behaviour"),
     METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
-        "hive.metastore.disallow.incompatible.col.type.changes", false,
+        "hive.metastore.disallow.incompatible.col.type.changes", true,
         "If true (default is false), ALTER TABLE operations which change the type of a\n" +
         "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
         "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
index 606cb3a..cf15ff2 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
@@ -68,6 +68,7 @@ public class TestSemanticAnalysis extends HCatBaseTest {
           "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe");
       hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
           HCatSemanticAnalyzer.class.getName());
+      hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, false);
       hcatDriver = new Driver(hcatConf);
       SessionState.start(new CliSessionState(hcatConf));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 891322a..aa9c7d3 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -565,7 +565,7 @@ public class TestHCatClient {
       client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());
 
       List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely", Type.DOUBLE, ""),
-          new HCatFieldSchema("new", Type.FLOAT, ""),
+          new HCatFieldSchema("new", Type.STRING, ""),
           new HCatFieldSchema("fields", Type.STRING, ""));
 
       client.updateTableSchema(dbName, tableName, newSchema);

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index bbaa1ce..02cbd76 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -51,11 +51,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -79,6 +77,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
 import org.apache.hive.common.util.ReflectionUtil;
@@ -632,9 +631,6 @@ public class MetaStoreUtils {
    * Two types are compatible if we have internal functions to cast one to another.
    */
   static private boolean areColTypesCompatible(String oldType, String newType) {
-    if (oldType.equals(newType)) {
-      return true;
-    }
 
     /*
      * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
@@ -645,12 +641,9 @@ public class MetaStoreUtils {
      * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
      * not blocked.
      */
-    if(serdeConstants.PrimitiveTypes.contains(oldType.toLowerCase()) &&
-        serdeConstants.PrimitiveTypes.contains(newType.toLowerCase())) {
-      return true;
-    }
 
-    return false;
+    return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
+      TypeInfoUtils.getTypeInfoFromTypeString(newType));
   }
 
   public static final int MAX_MS_TYPENAME_LENGTH = 2000; // 4000/2, for an unlikely unicode case

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 2196ca9..5353062 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -22,7 +22,6 @@ import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.EnumMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
@@ -558,30 +557,6 @@ public final class FunctionRegistry {
     return synonyms;
   }
 
-  // The ordering of types here is used to determine which numeric types
-  // are common/convertible to one another. Probably better to rely on the
-  // ordering explicitly defined here than to assume that the enum values
-  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
-  static EnumMap<PrimitiveCategory, Integer> numericTypes =
-      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
-  static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
-
-  static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
-    numericTypeList.add(primitiveCategory);
-    numericTypes.put(primitiveCategory, level);
-  }
-
-  static {
-    registerNumericType(PrimitiveCategory.BYTE, 1);
-    registerNumericType(PrimitiveCategory.SHORT, 2);
-    registerNumericType(PrimitiveCategory.INT, 3);
-    registerNumericType(PrimitiveCategory.LONG, 4);
-    registerNumericType(PrimitiveCategory.FLOAT, 5);
-    registerNumericType(PrimitiveCategory.DOUBLE, 6);
-    registerNumericType(PrimitiveCategory.DECIMAL, 7);
-    registerNumericType(PrimitiveCategory.STRING, 8);
-  }
-
   /**
    * Check if the given type is numeric. String is considered numeric when used in
    * numeric operators.
@@ -702,15 +677,15 @@ public final class FunctionRegistry {
           (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING);
     }
 
-    if (FunctionRegistry.implicitConvertible(a, b)) {
+    if (TypeInfoUtils.implicitConvertible(a, b)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcB);
     }
-    if (FunctionRegistry.implicitConvertible(b, a)) {
+    if (TypeInfoUtils.implicitConvertible(b, a)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcA);
     }
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t);
       }
     }
@@ -759,9 +734,9 @@ public final class FunctionRegistry {
       return TypeInfoFactory.doubleTypeInfo;
     }
 
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t);
       }
     }
@@ -790,8 +765,8 @@ public final class FunctionRegistry {
     if (pgB == PrimitiveGrouping.DATE_GROUP && pgA == PrimitiveGrouping.STRING_GROUP) {
       return PrimitiveCategory.STRING;
     }
-    Integer ai = numericTypes.get(pcA);
-    Integer bi = numericTypes.get(pcB);
+    Integer ai = TypeInfoUtils.numericTypes.get(pcA);
+    Integer bi = TypeInfoUtils.numericTypes.get(pcB);
     if (ai == null || bi == null) {
       // If either is not a numeric type, return null.
       return null;
@@ -870,73 +845,6 @@ public final class FunctionRegistry {
     return TypeInfoFactory.getStructTypeInfo(names, typeInfos);
   }
 
-  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) {
-    if (from == to) {
-      return true;
-    }
-
-    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
-    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
-
-    // Allow implicit String to Double conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
-      return true;
-    }
-    // Allow implicit String to Decimal conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
-      return true;
-    }
-    // Void can be converted to any type
-    if (from == PrimitiveCategory.VOID) {
-      return true;
-    }
-
-    // Allow implicit String to Date conversion
-    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-    // Allow implicit Numeric to String conversion
-    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-    // Allow implicit String to varchar conversion, and vice versa
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-
-    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
-    // Decimal -> String
-    Integer f = numericTypes.get(from);
-    Integer t = numericTypes.get(to);
-    if (f == null || t == null) {
-      return false;
-    }
-    if (f.intValue() > t.intValue()) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns whether it is possible to implicitly convert an object of Class
-   * from to Class to.
-   */
-  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
-    if (from.equals(to)) {
-      return true;
-    }
-
-    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
-    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
-    // seen as equivalent.
-    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) {
-      return implicitConvertible(
-          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
-          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
-    }
-    return false;
-  }
-
   /**
    * Get the GenericUDAF evaluator for the name and argumentClasses.
    *
@@ -1105,7 +1013,7 @@ public final class FunctionRegistry {
       // but there is a conversion cost.
       return 1;
     }
-    if (!exact && implicitConvertible(argumentPassed, argumentAccepted)) {
+    if (!exact && TypeInfoUtils.implicitConvertible(argumentPassed, argumentAccepted)) {
       return 1;
     }
 
@@ -1273,9 +1181,9 @@ public final class FunctionRegistry {
             acceptedIsPrimitive = true;
             acceptedPrimCat = ((PrimitiveTypeInfo) accepted).getPrimitiveCategory();
           }
-          if (acceptedIsPrimitive && numericTypes.containsKey(acceptedPrimCat)) {
+          if (acceptedIsPrimitive && TypeInfoUtils.numericTypes.containsKey(acceptedPrimCat)) {
             // We're looking for the udf with the smallest maximum numeric type.
-            int typeValue = numericTypes.get(acceptedPrimCat);
+            int typeValue = TypeInfoUtils.numericTypes.get(acceptedPrimCat);
             maxNumericType = typeValue > maxNumericType ? typeValue : maxNumericType;
           } else if (!accepted.equals(reference)) {
             // There are non-numeric arguments that don't match from one UDF to

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 3a6535b..7f5d72a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hive.common.util.DateUtils;
@@ -903,7 +904,7 @@ public class TypeCheckProcFactory {
 
         if (myt.getCategory() == Category.LIST) {
           // Only allow integer index for now
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               TypeInfoFactory.intTypeInfo)) {
             throw new SemanticException(SemanticAnalyzer.generateErrorMessage(
                   expr, ErrorMsg.INVALID_ARRAYINDEX_TYPE.getMsg()));
@@ -913,7 +914,7 @@ public class TypeCheckProcFactory {
           TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo();
           desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(), children);
         } else if (myt.getCategory() == Category.MAP) {
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               ((MapTypeInfo) myt).getMapKeyTypeInfo())) {
             throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_TYPE
                 .getMsg(expr));

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 068bdee..6a83c32 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -80,7 +80,7 @@ public class TestFunctionRegistry extends TestCase {
   }
 
   private void implicit(TypeInfo a, TypeInfo b, boolean convertible) {
-    assertEquals(convertible, FunctionRegistry.implicitConvertible(a, b));
+    assertEquals(convertible, TypeInfoUtils.implicitConvertible(a, b));
   }
 
   public void testImplicitConversion() {

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
index d0d748c..cec9a0d 100644
--- a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
+++ b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
@@ -1,4 +1,4 @@
-SET hive.metastore.disallow.incompatible.col.type.changes=true;
+SET hive.metastore.disallow.incompatible.col.type.changes=false;
 SELECT * FROM src LIMIT 1;
 CREATE TABLE test_table123 (a INT, b MAP<STRING, STRING>) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
 INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1;
@@ -11,7 +11,11 @@ ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN;
+
+SET hive.metastore.disallow.incompatible.col.type.changes=true;
 -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/alter1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter1.q b/ql/src/test/queries/clientpositive/alter1.q
index 2fac195..767ab5c 100644
--- a/ql/src/test/queries/clientpositive/alter1.q
+++ b/ql/src/test/queries/clientpositive/alter1.q
@@ -21,8 +21,9 @@ describe extended alter1;
 
 alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 describe extended alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table alter1 replace columns (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 describe alter1;
 
 -- Cleanup
@@ -61,8 +62,9 @@ DESCRIBE EXTENDED alter1_db.alter1;
 
 ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 DESCRIBE EXTENDED alter1_db.alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE alter1_db.alter1;
 
 DROP TABLE alter1_db.alter1;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/avro_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q
index a06e7c4..9e6c79a 100644
--- a/ql/src/test/queries/clientpositive/avro_partitioned.q
+++ b/ql/src/test/queries/clientpositive/avro_partitioned.q
@@ -112,7 +112,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat';
 
 -- Insert data into a partition
 INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Evolve the table schema by adding new array field "cast_and_crew"
 ALTER TABLE episodes_partitioned_serdeproperties
 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
@@ -144,5 +144,6 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{
   ]
 }');
 
+reset hive.metastore.disallow.incompatible.col.type.changes;
 -- Try selecting from the evolved table
 SELECT * FROM episodes_partitioned_serdeproperties;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
index 8d8cb6b..851a821 100644
--- a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
+++ b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
@@ -22,5 +22,7 @@ SELECT * FROM columnShortcutTable;
 
 ALTER TABLE columnShortcutTable ADD COLUMNS (c string);
 SELECT * FROM columnShortcutTable;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE columnShortcutTable REPLACE COLUMNS (key int);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 SELECT * FROM columnShortcutTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/input3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input3.q b/ql/src/test/queries/clientpositive/input3.q
index 2efa7a4..1925fff 100644
--- a/ql/src/test/queries/clientpositive/input3.q
+++ b/ql/src/test/queries/clientpositive/input3.q
@@ -1,7 +1,3 @@
-
-
-
-
 CREATE TABLE TEST3a(A INT, B DOUBLE) STORED AS TEXTFILE; 
 DESCRIBE TEST3a; 
 CREATE TABLE TEST3b(A ARRAY<INT>, B DOUBLE, C MAP<DOUBLE, INT>) STORED AS TEXTFILE; 
@@ -16,11 +12,9 @@ ALTER TABLE TEST3b RENAME TO TEST3c;
 ALTER TABLE TEST3b RENAME TO TEST3c;
 DESCRIBE TEST3c; 
 SHOW TABLES;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 EXPLAIN
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE EXTENDED TEST3c;
-
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/lineage3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
index 70d4e57..d1fb454 100644
--- a/ql/src/test/queries/clientpositive/lineage3.q
+++ b/ql/src/test/queries/clientpositive/lineage3.q
@@ -1,5 +1,5 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 drop table if exists d1;
 create table d1(a int);
 
@@ -202,3 +202,4 @@ insert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d wh
 insert into dest_dp2 partition (y=1, m) select f, w, m
 insert into dest_dp1 partition (year=0) select f, w;
 
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
index 4a805a0..c3e2cf9 100644
--- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 create table if not exists alltypes (
  bo boolean,
  ti tinyint,
@@ -77,3 +78,4 @@ select * from src_part_orc limit 10;
 
 alter table src_part_orc change key key bigint;
 select * from src_part_orc limit 10;
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
index af0cf99..d2f2996 100644
--- a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
+++ b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
@@ -11,10 +11,10 @@ INSERT OVERWRITE TABLE NewStructField SELECT named_struct('a1', map('k1','v1'),
 
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Adds new fields to the struct types
 ALTER TABLE NewStructField REPLACE COLUMNS (a struct<a1:map<string,string>, a2:struct<e1:int,e2:string>, a3:int>, b int);
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
 
@@ -24,4 +24,4 @@ DESCRIBE NewStructFieldTable;
 SELECT * FROM NewStructFieldTable;
 
 DROP TABLE NewStructField;
-DROP TABLE NewStructFieldTable;
\ No newline at end of file
+DROP TABLE NewStructFieldTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
index 1a4291f..b2db2f1 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select * fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
index bc51cb5..632d022 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select * fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
index 2e4ae69..f124ec3 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
@@ -4,8 +4,9 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 create table T1(key string, value string) partitioned by (dt string) stored as rcfile;
 alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
 insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table T1 change key key int;
+reset hive.metastore.disallow.incompatible.col.type.changes;
 insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97;
 
 alter table T1 change key key string;
@@ -14,4 +15,4 @@ create table T2(key string, value string) partitioned by (dt string) stored as r
 insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97;
 
 select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
\ No newline at end of file
+select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
index 6fce1e0..70a454f 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
index 37bb1a7..92757f6 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/queries/clientpositive/rename_column.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_column.q b/ql/src/test/queries/clientpositive/rename_column.q
index a3f3f30..a211cfa 100644
--- a/ql/src/test/queries/clientpositive/rename_column.q
+++ b/ql/src/test/queries/clientpositive/rename_column.q
@@ -3,7 +3,7 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE a a STRING;
 DESCRIBE kv_rename_test;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE kv_rename_test CHANGE a a1 INT;
 DESCRIBE kv_rename_test;
 
@@ -52,6 +52,6 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b;
 DESCRIBE kv_rename_test;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DROP TABLE kv_rename_test;
 SHOW TABLES;

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
index 96600eb..69b2b41 100644
--- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
+++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
@@ -103,9 +103,10 @@ POSTHOOK: Output: default@test_table123
 PREHOOK: query: -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
-b
+a,b

http://git-wip-us.apache.org/repos/asf/hive/blob/0add6378/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index 24361c7..1d79880 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -23,6 +23,7 @@ import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
 import java.util.ArrayList;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
 
 /**
@@ -53,6 +55,25 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
  */
 public final class TypeInfoUtils {
 
+  public static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
+  // The ordering of types here is used to determine which numeric types
+  // are common/convertible to one another. Probably better to rely on the
+  // ordering explicitly defined here than to assume that the enum values
+  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
+  public static EnumMap<PrimitiveCategory, Integer> numericTypes =
+      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
+
+  static {
+    registerNumericType(PrimitiveCategory.BYTE, 1);
+    registerNumericType(PrimitiveCategory.SHORT, 2);
+    registerNumericType(PrimitiveCategory.INT, 3);
+    registerNumericType(PrimitiveCategory.LONG, 4);
+    registerNumericType(PrimitiveCategory.FLOAT, 5);
+    registerNumericType(PrimitiveCategory.DOUBLE, 6);
+    registerNumericType(PrimitiveCategory.DECIMAL, 7);
+    registerNumericType(PrimitiveCategory.STRING, 8);
+  }
+
   private TypeInfoUtils() {
     // prevent instantiation
   }
@@ -266,7 +287,7 @@ public final class TypeInfoUtils {
      *
      * tokenize("map<int,string>") should return
      * ["map","<","int",",","string",">"]
-     * 
+     *
      * Note that we add '$' in new Calcite return path. As '$' will not appear
      * in any type in Hive, it is safe to do so.
      */
@@ -810,4 +831,76 @@ public final class TypeInfoUtils {
         return 0;
     }
   }
+
+  public static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
+    numericTypeList.add(primitiveCategory);
+    numericTypes.put(primitiveCategory, level);
+  }
+
+  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) {
+    if (from == to) {
+      return true;
+    }
+
+    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
+    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
+
+    // Allow implicit String to Double conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
+      return true;
+    }
+    // Allow implicit String to Decimal conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
+      return true;
+    }
+    // Void can be converted to any type
+    if (from == PrimitiveCategory.VOID) {
+      return true;
+    }
+
+    // Allow implicit String to Date conversion
+    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit Numeric to String conversion
+    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit String to varchar conversion, and vice versa
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+
+    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
+    // Decimal -> String
+    Integer f = numericTypes.get(from);
+    Integer t = numericTypes.get(to);
+    if (f == null || t == null) {
+      return false;
+    }
+    if (f.intValue() > t.intValue()) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns whether it is possible to implicitly convert an object of Class
+   * from to Class to.
+   */
+  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
+    if (from.equals(to)) {
+      return true;
+    }
+
+    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
+    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
+    // seen as equivalent.
+    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) {
+      return implicitConvertible(
+          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
+          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
+    }
+    return false;
+  }
 }


[23/55] [abbrv] hive git commit: HIVE-12315: Fix Vectorized double divide by zero (Gopal V, reviewed by Matt McCline)

Posted by xu...@apache.org.
HIVE-12315: Fix Vectorized double divide by zero (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b5654cc6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b5654cc6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b5654cc6

Branch: refs/heads/spark
Commit: b5654cc6ffbe99a13c2a41b8cc0ffdfadcf9274f
Parents: 0add637
Author: Gopal V <go...@apache.org>
Authored: Thu Nov 5 17:16:46 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Thu Nov 5 17:16:46 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/vector/expressions/NullUtil.java    | 21 ++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b5654cc6/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
index e4a9824..2eb48fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
@@ -125,20 +125,21 @@ public class NullUtil {
   public static void setNullAndDivBy0DataEntriesDouble(
       DoubleColumnVector v, boolean selectedInUse, int[] sel, int n, LongColumnVector denoms) {
     assert v.isRepeating || !denoms.isRepeating;
+    final boolean realNulls = !v.noNulls;
     v.noNulls = false;
     long[] vector = denoms.vector;
-    if (v.isRepeating && (v.isNull[0] = (v.isNull[0] || vector[0] == 0))) {
+    if (v.isRepeating && (v.isNull[0] = ((realNulls && v.isNull[0]) || vector[0] == 0))) {
       v.vector[0] = DoubleColumnVector.NULL_VALUE;
     } else if (selectedInUse) {
       for (int j = 0; j != n; j++) {
         int i = sel[j];
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
     } else {
       for (int i = 0; i != n; i++) {
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
@@ -152,20 +153,21 @@ public class NullUtil {
   public static void setNullAndDivBy0DataEntriesDouble(
       DoubleColumnVector v, boolean selectedInUse, int[] sel, int n, DoubleColumnVector denoms) {
     assert v.isRepeating || !denoms.isRepeating;
+    final boolean realNulls = !v.noNulls;
     v.noNulls = false;
     double[] vector = denoms.vector;
-    if (v.isRepeating && (v.isNull[0] = (v.isNull[0] || vector[0] == 0))) {
+    if (v.isRepeating && (v.isNull[0] = ((realNulls && v.isNull[0]) || vector[0] == 0))) {
       v.vector[0] = DoubleColumnVector.NULL_VALUE;
     } else if (selectedInUse) {
       for (int j = 0; j != n; j++) {
         int i = sel[j];
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
     } else {
       for (int i = 0; i != n; i++) {
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
@@ -235,6 +237,13 @@ public class NullUtil {
 
     outputColVector.noNulls = inputColVector1.noNulls && inputColVector2.noNulls;
 
+    if (outputColVector.noNulls) {
+      // the inputs might not always have isNull initialized for
+      // inputColVector1.isNull[i] || inputColVector2.isNull[i] to be valid
+      Arrays.fill(outputColVector.isNull, false);
+      return;
+    }
+
     if (inputColVector1.noNulls && !inputColVector2.noNulls) {
       if (inputColVector2.isRepeating) {
         outputColVector.isNull[0] = inputColVector2.isNull[0];


[05/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out
index ce1fe3f..abbfc50 100644
--- a/ql/src/test/results/clientpositive/decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/decimal_udf.q.out
@@ -55,44 +55,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -122,44 +122,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -325,44 +325,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -392,44 +392,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -595,42 +595,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -665,29 +665,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -717,44 +717,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -989,40 +989,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1055,30 +1055,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1233,44 +1233,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1359,23 +1359,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1407,44 +1407,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1476,44 +1476,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1683,42 +1683,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -1821,44 +1821,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2134,7 +2134,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2193,7 +2193,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
index 1f613c4..f7a9853 100644
--- a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
+++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
@@ -31,11 +31,11 @@ POSTHOOK: query: select * from sample_06 where gpa = 3.00
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@sample_06
 #### A masked pattern was here ####
-aaa	35	3
-bbb	32	3
-ccc	32	3
-ddd	35	3
-eee	32	3
+aaa	35	3.00
+bbb	32	3.00
+ccc	32	3.00
+ddd	35	3.00
+eee	32	3.00
 PREHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2))
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -63,8 +63,8 @@ POSTHOOK: query: select * from tab1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab1
 #### A masked pattern was here ####
-aaa	35	3
-bbb	32	3
-ccc	32	3
-ddd	35	3
-eee	32	3
+aaa	35	3.00
+bbb	32	3.00
+ccc	32	3.00
+ddd	35	3.00
+eee	32	3.00

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
index 1586f8a..36a032a 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -1320,105 +1320,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1508,105 +1508,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: DROP TABLE decimal_mapjoin
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_mapjoin

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
index 98d9ceb..4c8b295 100644
--- a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
@@ -169,112 +169,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -285,109 +285,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/orc_file_dump.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out
index c494d47..50d5701 100644
--- a/ql/src/test/results/clientpositive/orc_file_dump.q.out
+++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out
@@ -196,7 +196,7 @@ File length: 33458 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: alter table orc_ppd set tblproperties("orc.bloom.filter.fpp"="0.01")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@orc_ppd
@@ -314,7 +314,7 @@ File length: 38613 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: CREATE TABLE orc_ppd_part(t tinyint,
            si smallint,
            i int,
@@ -444,4 +444,4 @@ File length: 33458 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty	2015	10
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty	2015	10

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
index 0d4cd15..6a528dd 100644
--- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
@@ -251,7 +251,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_pred
@@ -260,7 +260,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
   WHERE t IS NOT NULL
   AND t < 0

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_decimal.q.out b/ql/src/test/results/clientpositive/parquet_decimal.q.out
index 493bd4f..a19cd6d 100644
--- a/ql/src/test/results/clientpositive/parquet_decimal.q.out
+++ b/ql/src/test/results/clientpositive/parquet_decimal.q.out
@@ -63,9 +63,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: SELECT value, count(*) FROM parq_dec GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parq_dec
@@ -75,14 +75,14 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parq_dec
 #### A masked pattern was here ####
 -12.25	1
-0	1
+0.00	1
 4.33	1
 5.96	1
-19	1
+19.00	1
 33.33	1
 55.71	1
 77.34	1
-79.9	1
+79.90	1
 234.79	1
 PREHOOK: query: TRUNCATE TABLE parq_dec
 PREHOOK: type: TRUNCATETABLE
@@ -158,12 +158,12 @@ POSTHOOK: Input: default@parq_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
 0.2
 3.2
-8
+8.0
 PREHOOK: query: DROP TABLE dec
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@dec

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
index 1355849..6e62ee4 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
@@ -26,11 +26,11 @@ POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -39,11 +39,11 @@ POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -52,11 +52,11 @@ POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -73,16 +73,16 @@ POSTHOOK: query: select * from newtypestbl where b<=true sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -91,11 +91,11 @@ POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -104,11 +104,11 @@ POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -125,11 +125,11 @@ POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -138,11 +138,11 @@ POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -151,11 +151,11 @@ POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -164,11 +164,11 @@ POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -177,11 +177,11 @@ POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -198,16 +198,16 @@ POSTHOOK: query: select * from newtypestbl where b<=true sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -216,11 +216,11 @@ POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -229,11 +229,11 @@ POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -250,11 +250,11 @@ POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -263,8 +263,8 @@ POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
index f224870..defaa9d 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c<="hello" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,16 +106,16 @@ POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c<="hello" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -124,16 +124,16 @@ POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c="apple "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,16 +184,16 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -202,16 +202,16 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c in ("carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,16 +262,16 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -280,16 +280,16 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
index e599014..55231e9 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,16 +171,16 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -189,16 +189,16 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-15'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,16 +249,16 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -267,16 +267,16 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -285,11 +285,11 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -298,11 +298,11 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -311,16 +311,16 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -329,16 +329,16 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -363,11 +363,11 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -376,11 +376,11 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -389,16 +389,16 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -407,16 +407,16 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl


[16/55] [abbrv] hive git commit: HIVE-12252 Streaming API HiveEndPoint can be created w/o partitionVals for partitioned table (Wei Zheng via Eugene Koifman)

Posted by xu...@apache.org.
HIVE-12252 Streaming API HiveEndPoint can be created w/o partitionVals for partitioned table (Wei Zheng via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0918ff95
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0918ff95
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0918ff95

Branch: refs/heads/spark
Commit: 0918ff959e6b0fd67a6b8b478290436af9532a31
Parents: 6d936b5
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Thu Nov 5 10:07:30 2015 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Thu Nov 5 10:07:30 2015 -0800

----------------------------------------------------------------------
 .../hcatalog/streaming/ConnectionError.java     |  4 ++
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 51 +++++++++++++++-----
 .../hive/hcatalog/streaming/TestStreaming.java  | 35 +++++++++++---
 3 files changed, 71 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0918ff95/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
index 1aeef76..ffa51c9 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
@@ -20,6 +20,10 @@ package org.apache.hive.hcatalog.streaming;
 
 public class ConnectionError extends StreamingException {
 
+  public ConnectionError(String msg) {
+    super(msg);
+  }
+
   public ConnectionError(String msg, Exception innerEx) {
     super(msg, innerEx);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/0918ff95/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 306c93d..2f2d44a 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -279,23 +279,48 @@ public class HiveEndPoint {
       }
     }
 
-    private void checkEndPoint(HiveEndPoint endPoint, IMetaStoreClient msClient) throws InvalidTable {
-      // 1 - check if TBLPROPERTIES ('transactional'='true') is set on table
+    /**
+     * Checks the validity of endpoint
+     * @param endPoint the HiveEndPoint to be checked
+     * @param msClient the metastore client
+     * @throws InvalidTable
+     */
+    private void checkEndPoint(HiveEndPoint endPoint, IMetaStoreClient msClient)
+        throws InvalidTable, ConnectionError {
+      Table t;
       try {
-        Table t = msClient.getTable(endPoint.database, endPoint.table);
-        Map<String, String> params = t.getParameters();
-        if(params != null) {
-          String transactionalProp = params.get("transactional");
-          if (transactionalProp != null && transactionalProp.equalsIgnoreCase("true")) {
-            return;
-          }
-        }
-        LOG.error("'transactional' property is not set on Table " + endPoint);
-        throw new InvalidTable(endPoint.database, endPoint.table, "\'transactional\' property is not set on Table");
+        t = msClient.getTable(endPoint.database, endPoint.table);
       } catch (Exception e) {
-        LOG.warn("Unable to check if Table is transactional. " + endPoint, e);
+        LOG.warn("Unable to check the endPoint: " + endPoint, e);
         throw new InvalidTable(endPoint.database, endPoint.table, e);
       }
+
+      // 1 - check if TBLPROPERTIES ('transactional'='true') is set on table
+      Map<String, String> params = t.getParameters();
+      if (params != null) {
+        String transactionalProp = params.get("transactional");
+        if (transactionalProp == null || !transactionalProp.equalsIgnoreCase("true")) {
+          LOG.error("'transactional' property is not set on Table " + endPoint);
+          throw new InvalidTable(endPoint.database, endPoint.table, "\'transactional\' property" +
+              " is not set on Table");          }
+      }
+
+      // 2 - check if partitionvals are legitimate
+      if (t.getPartitionKeys() != null && !t.getPartitionKeys().isEmpty()
+          && endPoint.partitionVals.isEmpty()) {
+        // Invalid if table is partitioned, but endPoint's partitionVals is empty
+        String errMsg = "HiveEndPoint " + endPoint + " doesn't specify any partitions for " +
+            "partitioned table";
+        LOG.error(errMsg);
+        throw new ConnectionError(errMsg);
+      }
+      if ((t.getPartitionKeys() == null || t.getPartitionKeys().isEmpty())
+          && !endPoint.partitionVals.isEmpty()) {
+        // Invalid if table is not partitioned, but endPoint's partitionVals is not empty
+        String errMsg = "HiveEndPoint" + endPoint + " specifies partitions for unpartitioned table";
+        LOG.error(errMsg);
+        throw new ConnectionError(errMsg);
+      }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hive/blob/0918ff95/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index d9a7eae..58cfbaa 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -204,7 +204,7 @@ public class TestStreaming {
 
     dropDB(msClient, dbName2);
     String loc2 = dbFolder.newFolder(dbName2 + ".db").toString();
-    partLoc2 = createDbAndTable(driver, dbName2, tblName2, partitionVals, colNames, colTypes, bucketCols, partNames, loc2, 2);
+    partLoc2 = createDbAndTable(driver, dbName2, tblName2, null, colNames, colTypes, bucketCols, null, loc2, 2);
 
     String loc3 = dbFolder.newFolder("testing5.db").toString();
     createStoreSales("testing5", loc3);
@@ -477,15 +477,38 @@ public class TestStreaming {
 
   @Test
   public void testEndpointConnection() throws Exception {
-    // 1) Basic
-    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName
-            , partitionVals);
+    // For partitioned table, partitionVals are specified
+    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, partitionVals);
     StreamingConnection connection = endPt.newConnection(false, null); //shouldn't throw
     connection.close();
 
-    // 2) Leave partition unspecified
-    endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, null);
+    // For unpartitioned table, partitionVals are not specified
+    endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, null);
     endPt.newConnection(false, null).close(); // should not throw
+
+    // For partitioned table, partitionVals are not specified
+    try {
+      endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, null);
+      connection = endPt.newConnection(true);
+      Assert.assertTrue("ConnectionError was not thrown", false);
+      connection.close();
+    } catch (ConnectionError e) {
+      // expecting this exception
+      String errMsg = "doesn't specify any partitions for partitioned table";
+      Assert.assertTrue(e.toString().endsWith(errMsg));
+    }
+
+    // For unpartitioned table, partition values are specified
+    try {
+      endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, partitionVals);
+      connection = endPt.newConnection(false);
+      Assert.assertTrue("ConnectionError was not thrown", false);
+      connection.close();
+    } catch (ConnectionError e) {
+      // expecting this exception
+      String errMsg = "specifies partitions for unpartitioned table";
+      Assert.assertTrue(e.toString().endsWith(errMsg));
+    }
   }
 
   @Test


[15/55] [abbrv] hive git commit: HIVE-12304 "drop database cascade" needs to unregister functions

Posted by xu...@apache.org.
HIVE-12304 "drop database cascade" needs to unregister functions


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6d936b53
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6d936b53
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6d936b53

Branch: refs/heads/spark
Commit: 6d936b533f6030e401937d4c4dff2feca5990916
Parents: 175087b
Author: aihuaxu <ai...@apache.org>
Authored: Fri Oct 30 13:31:08 2015 -0400
Committer: aihuaxu <ai...@apache.org>
Committed: Thu Nov 5 11:34:40 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  7 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |  9 +++
 .../apache/hadoop/hive/ql/exec/Registry.java    | 12 +++
 .../clientnegative/drop_database_cascade.q      | 26 ++++++
 .../clientnegative/drop_database_cascade.q.out  | 85 ++++++++++++++++++++
 5 files changed, 138 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6d936b53/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index caf98b5..9ab3e98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -3731,7 +3731,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int dropDatabase(Hive db, DropDatabaseDesc dropDb)
       throws HiveException {
     try {
-      db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists(), dropDb.isCasdade());
+      String dbName = dropDb.getDatabaseName();
+      db.dropDatabase(dbName, true, dropDb.getIfExists(), dropDb.isCasdade());
+      // Unregister the functions as well
+      if (dropDb.isCasdade()) {
+        FunctionRegistry.unregisterPermanentFunctions(dbName);
+      }
     }
     catch (NoSuchObjectException ex) {
       throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, dropDb.getDatabaseName());

http://git-wip-us.apache.org/repos/asf/hive/blob/6d936b53/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index de8e98c..2196ca9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -1571,6 +1571,15 @@ public final class FunctionRegistry {
     unregisterTemporaryUDF(functionName);
   }
 
+  /**
+   * Unregisters all the functions under the database dbName
+   * @param dbName specified database name
+   * @throws HiveException
+   */
+  public static void unregisterPermanentFunctions(String dbName) throws HiveException {
+    system.unregisterFunctions(dbName);
+  }
+
   private FunctionRegistry() {
     // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6d936b53/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 1121819..ea9813c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -419,6 +419,18 @@ public class Registry {
     }
   }
 
+  /**
+   * Unregisters all the functions belonging to the specified database
+   * @param dbName database name
+   * @throws HiveException
+   */
+  public synchronized void unregisterFunctions(String dbName) throws HiveException {
+    Set<String> funcNames = getFunctionNames(dbName.toLowerCase() + "\\..*");
+    for (String funcName : funcNames) {
+      unregisterFunction(funcName);
+    }
+  }
+
   public GenericUDAFResolver getGenericUDAFResolver(String functionName) throws SemanticException {
     FunctionInfo info = getFunctionInfo(functionName);
     if (info != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6d936b53/ql/src/test/queries/clientnegative/drop_database_cascade.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_database_cascade.q b/ql/src/test/queries/clientnegative/drop_database_cascade.q
new file mode 100644
index 0000000..d544692
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_database_cascade.q
@@ -0,0 +1,26 @@
+-- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database;
+
+USE TEST_database;
+
+CREATE TABLE test_table (key STRING, value STRING);
+
+CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
+USE default;
+
+CREATE TABLE test_table (key STRING, value STRING);
+
+CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
+DROP DATABASE TEST_database CASCADE;
+
+describe test_table;
+
+describe function test_func;
+
+describe function TEST_database.test_func;
+
+describe TEST_database.test_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/6d936b53/ql/src/test/results/clientnegative/drop_database_cascade.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_database_cascade.q.out b/ql/src/test/results/clientnegative/drop_database_cascade.q.out
new file mode 100644
index 0000000..304b967
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_database_cascade.q.out
@@ -0,0 +1,85 @@
+PREHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:TEST_database
+POSTHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:TEST_database
+PREHOOK: query: USE TEST_database
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:test_database
+POSTHOOK: query: USE TEST_database
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:test_database
+PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: TEST_database@test_table
+PREHOOK: Output: database:test_database
+POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: TEST_database@test_table
+POSTHOOK: Output: database:test_database
+PREHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:test_database
+PREHOOK: Output: test_database.test_func
+POSTHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:test_database
+POSTHOOK: Output: test_database.test_func
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table
+POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table
+PREHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:default
+PREHOOK: Output: default.test_func
+POSTHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default.test_func
+PREHOOK: query: DROP DATABASE TEST_database CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:test_database
+PREHOOK: Output: database:test_database
+PREHOOK: Output: test_database@test_table
+POSTHOOK: query: DROP DATABASE TEST_database CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:test_database
+POSTHOOK: Output: database:test_database
+POSTHOOK: Output: test_database@test_table
+PREHOOK: query: describe test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+key                 	string              	                    
+value               	string              	                    
+PREHOOK: query: describe function test_func
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: describe function test_func
+POSTHOOK: type: DESCFUNCTION
+test_func(str) - returns the numeric value of the first character of str
+PREHOOK: query: describe function TEST_database.test_func
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: describe function TEST_database.test_func
+POSTHOOK: type: DESCFUNCTION
+Function 'TEST_database.test_func' does not exist.
+FAILED: SemanticException [Error 10001]: Table not found TEST_database.test_table


[44/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 3d7cb18..cea9000 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -43,6 +43,7 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0;
   virtual void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) = 0;
   virtual void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) = 0;
+  virtual void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) = 0;
   virtual void get_all_tables(std::vector<std::string> & _return, const std::string& db_name) = 0;
   virtual void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) = 0;
   virtual void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
@@ -246,6 +247,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void get_tables(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */) {
     return;
   }
+  void get_table_meta(std::vector<TableMeta> & /* _return */, const std::string& /* db_patterns */, const std::string& /* tbl_patterns */, const std::vector<std::string> & /* tbl_types */) {
+    return;
+  }
   void get_all_tables(std::vector<std::string> & /* _return */, const std::string& /* db_name */) {
     return;
   }
@@ -3199,6 +3203,132 @@ class ThriftHiveMetastore_get_tables_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_get_table_meta_args__isset {
+  _ThriftHiveMetastore_get_table_meta_args__isset() : db_patterns(false), tbl_patterns(false), tbl_types(false) {}
+  bool db_patterns :1;
+  bool tbl_patterns :1;
+  bool tbl_types :1;
+} _ThriftHiveMetastore_get_table_meta_args__isset;
+
+class ThriftHiveMetastore_get_table_meta_args {
+ public:
+
+  ThriftHiveMetastore_get_table_meta_args(const ThriftHiveMetastore_get_table_meta_args&);
+  ThriftHiveMetastore_get_table_meta_args& operator=(const ThriftHiveMetastore_get_table_meta_args&);
+  ThriftHiveMetastore_get_table_meta_args() : db_patterns(), tbl_patterns() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_table_meta_args() throw();
+  std::string db_patterns;
+  std::string tbl_patterns;
+  std::vector<std::string>  tbl_types;
+
+  _ThriftHiveMetastore_get_table_meta_args__isset __isset;
+
+  void __set_db_patterns(const std::string& val);
+
+  void __set_tbl_patterns(const std::string& val);
+
+  void __set_tbl_types(const std::vector<std::string> & val);
+
+  bool operator == (const ThriftHiveMetastore_get_table_meta_args & rhs) const
+  {
+    if (!(db_patterns == rhs.db_patterns))
+      return false;
+    if (!(tbl_patterns == rhs.tbl_patterns))
+      return false;
+    if (!(tbl_types == rhs.tbl_types))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_table_meta_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_table_meta_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_table_meta_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_table_meta_pargs() throw();
+  const std::string* db_patterns;
+  const std::string* tbl_patterns;
+  const std::vector<std::string> * tbl_types;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_table_meta_result__isset {
+  _ThriftHiveMetastore_get_table_meta_result__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_table_meta_result__isset;
+
+class ThriftHiveMetastore_get_table_meta_result {
+ public:
+
+  ThriftHiveMetastore_get_table_meta_result(const ThriftHiveMetastore_get_table_meta_result&);
+  ThriftHiveMetastore_get_table_meta_result& operator=(const ThriftHiveMetastore_get_table_meta_result&);
+  ThriftHiveMetastore_get_table_meta_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_table_meta_result() throw();
+  std::vector<TableMeta>  success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_table_meta_result__isset __isset;
+
+  void __set_success(const std::vector<TableMeta> & val);
+
+  void __set_o1(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_table_meta_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_table_meta_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_table_meta_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_table_meta_presult__isset {
+  _ThriftHiveMetastore_get_table_meta_presult__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_table_meta_presult__isset;
+
+class ThriftHiveMetastore_get_table_meta_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_table_meta_presult() throw();
+  std::vector<TableMeta> * success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_table_meta_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
 typedef struct _ThriftHiveMetastore_get_all_tables_args__isset {
   _ThriftHiveMetastore_get_all_tables_args__isset() : db_name(false) {}
   bool db_name :1;
@@ -16967,6 +17097,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
   void send_get_tables(const std::string& db_name, const std::string& pattern);
   void recv_get_tables(std::vector<std::string> & _return);
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  void recv_get_table_meta(std::vector<TableMeta> & _return);
   void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
   void send_get_all_tables(const std::string& db_name);
   void recv_get_all_tables(std::vector<std::string> & _return);
@@ -17325,6 +17458,7 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
   void process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_get_table_meta(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_all_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_table_objects_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -17459,6 +17593,7 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
     processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table;
     processMap_["drop_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context;
     processMap_["get_tables"] = &ThriftHiveMetastoreProcessor::process_get_tables;
+    processMap_["get_table_meta"] = &ThriftHiveMetastoreProcessor::process_get_table_meta;
     processMap_["get_all_tables"] = &ThriftHiveMetastoreProcessor::process_get_all_tables;
     processMap_["get_table"] = &ThriftHiveMetastoreProcessor::process_get_table;
     processMap_["get_table_objects_by_name"] = &ThriftHiveMetastoreProcessor::process_get_table_objects_by_name;
@@ -17801,6 +17936,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     return;
   }
 
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->get_table_meta(_return, db_patterns, tbl_patterns, tbl_types);
+    }
+    ifaces_[i]->get_table_meta(_return, db_patterns, tbl_patterns, tbl_types);
+    return;
+  }
+
   void get_all_tables(std::vector<std::string> & _return, const std::string& db_name) {
     size_t sz = ifaces_.size();
     size_t i = 0;
@@ -18929,6 +19074,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
   int32_t send_get_tables(const std::string& db_name, const std::string& pattern);
   void recv_get_tables(std::vector<std::string> & _return, const int32_t seqid);
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  int32_t send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  void recv_get_table_meta(std::vector<TableMeta> & _return, const int32_t seqid);
   void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
   int32_t send_get_all_tables(const std::string& db_name);
   void recv_get_all_tables(std::vector<std::string> & _return, const int32_t seqid);

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index a395729..c0d9401 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -127,6 +127,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("get_tables\n");
   }
 
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) {
+    // Your implementation goes here
+    printf("get_table_meta\n");
+  }
+
   void get_all_tables(std::vector<std::string> & _return, const std::string& db_name) {
     // Your implementation goes here
     printf("get_all_tables\n");

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 5fd4a90..ee28d0d 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -15157,6 +15157,163 @@ void GetAllFunctionsResponse::printTo(std::ostream& out) const {
 }
 
 
+TableMeta::~TableMeta() throw() {
+}
+
+
+void TableMeta::__set_dbName(const std::string& val) {
+  this->dbName = val;
+}
+
+void TableMeta::__set_tableName(const std::string& val) {
+  this->tableName = val;
+}
+
+void TableMeta::__set_tableType(const std::string& val) {
+  this->tableType = val;
+}
+
+void TableMeta::__set_comments(const std::string& val) {
+  this->comments = val;
+__isset.comments = true;
+}
+
+uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+  bool isset_dbName = false;
+  bool isset_tableName = false;
+  bool isset_tableType = false;
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dbName);
+          isset_dbName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->tableName);
+          isset_tableName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->tableType);
+          isset_tableType = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->comments);
+          this->__isset.comments = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  if (!isset_dbName)
+    throw TProtocolException(TProtocolException::INVALID_DATA);
+  if (!isset_tableName)
+    throw TProtocolException(TProtocolException::INVALID_DATA);
+  if (!isset_tableType)
+    throw TProtocolException(TProtocolException::INVALID_DATA);
+  return xfer;
+}
+
+uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("TableMeta");
+
+  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->dbName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->tableName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->tableType);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.comments) {
+    xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 4);
+    xfer += oprot->writeString(this->comments);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(TableMeta &a, TableMeta &b) {
+  using ::std::swap;
+  swap(a.dbName, b.dbName);
+  swap(a.tableName, b.tableName);
+  swap(a.tableType, b.tableType);
+  swap(a.comments, b.comments);
+  swap(a.__isset, b.__isset);
+}
+
+TableMeta::TableMeta(const TableMeta& other682) {
+  dbName = other682.dbName;
+  tableName = other682.tableName;
+  tableType = other682.tableType;
+  comments = other682.comments;
+  __isset = other682.__isset;
+}
+TableMeta& TableMeta::operator=(const TableMeta& other683) {
+  dbName = other683.dbName;
+  tableName = other683.tableName;
+  tableType = other683.tableType;
+  comments = other683.comments;
+  __isset = other683.__isset;
+  return *this;
+}
+void TableMeta::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "TableMeta(";
+  out << "dbName=" << to_string(dbName);
+  out << ", " << "tableName=" << to_string(tableName);
+  out << ", " << "tableType=" << to_string(tableType);
+  out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "<null>"));
+  out << ")";
+}
+
+
 MetaException::~MetaException() throw() {
 }
 
@@ -15226,13 +15383,13 @@ void swap(MetaException &a, MetaException &b) {
   swap(a.__isset, b.__isset);
 }
 
-MetaException::MetaException(const MetaException& other682) : TException() {
-  message = other682.message;
-  __isset = other682.__isset;
+MetaException::MetaException(const MetaException& other684) : TException() {
+  message = other684.message;
+  __isset = other684.__isset;
 }
-MetaException& MetaException::operator=(const MetaException& other683) {
-  message = other683.message;
-  __isset = other683.__isset;
+MetaException& MetaException::operator=(const MetaException& other685) {
+  message = other685.message;
+  __isset = other685.__isset;
   return *this;
 }
 void MetaException::printTo(std::ostream& out) const {
@@ -15323,13 +15480,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownTableException::UnknownTableException(const UnknownTableException& other684) : TException() {
-  message = other684.message;
-  __isset = other684.__isset;
+UnknownTableException::UnknownTableException(const UnknownTableException& other686) : TException() {
+  message = other686.message;
+  __isset = other686.__isset;
 }
-UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other685) {
-  message = other685.message;
-  __isset = other685.__isset;
+UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other687) {
+  message = other687.message;
+  __isset = other687.__isset;
   return *this;
 }
 void UnknownTableException::printTo(std::ostream& out) const {
@@ -15420,13 +15577,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownDBException::UnknownDBException(const UnknownDBException& other686) : TException() {
-  message = other686.message;
-  __isset = other686.__isset;
+UnknownDBException::UnknownDBException(const UnknownDBException& other688) : TException() {
+  message = other688.message;
+  __isset = other688.__isset;
 }
-UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other687) {
-  message = other687.message;
-  __isset = other687.__isset;
+UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other689) {
+  message = other689.message;
+  __isset = other689.__isset;
   return *this;
 }
 void UnknownDBException::printTo(std::ostream& out) const {
@@ -15517,13 +15674,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) {
   swap(a.__isset, b.__isset);
 }
 
-AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other688) : TException() {
-  message = other688.message;
-  __isset = other688.__isset;
+AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other690) : TException() {
+  message = other690.message;
+  __isset = other690.__isset;
 }
-AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other689) {
-  message = other689.message;
-  __isset = other689.__isset;
+AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other691) {
+  message = other691.message;
+  __isset = other691.__isset;
   return *this;
 }
 void AlreadyExistsException::printTo(std::ostream& out) const {
@@ -15614,13 +15771,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other690) : TException() {
-  message = other690.message;
-  __isset = other690.__isset;
+InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other692) : TException() {
+  message = other692.message;
+  __isset = other692.__isset;
 }
-InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other691) {
-  message = other691.message;
-  __isset = other691.__isset;
+InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other693) {
+  message = other693.message;
+  __isset = other693.__isset;
   return *this;
 }
 void InvalidPartitionException::printTo(std::ostream& out) const {
@@ -15711,13 +15868,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other692) : TException() {
-  message = other692.message;
-  __isset = other692.__isset;
+UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other694) : TException() {
+  message = other694.message;
+  __isset = other694.__isset;
 }
-UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other693) {
-  message = other693.message;
-  __isset = other693.__isset;
+UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other695) {
+  message = other695.message;
+  __isset = other695.__isset;
   return *this;
 }
 void UnknownPartitionException::printTo(std::ostream& out) const {
@@ -15808,13 +15965,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidObjectException::InvalidObjectException(const InvalidObjectException& other694) : TException() {
-  message = other694.message;
-  __isset = other694.__isset;
+InvalidObjectException::InvalidObjectException(const InvalidObjectException& other696) : TException() {
+  message = other696.message;
+  __isset = other696.__isset;
 }
-InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other695) {
-  message = other695.message;
-  __isset = other695.__isset;
+InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other697) {
+  message = other697.message;
+  __isset = other697.__isset;
   return *this;
 }
 void InvalidObjectException::printTo(std::ostream& out) const {
@@ -15905,13 +16062,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other696) : TException() {
-  message = other696.message;
-  __isset = other696.__isset;
+NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other698) : TException() {
+  message = other698.message;
+  __isset = other698.__isset;
 }
-NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other697) {
-  message = other697.message;
-  __isset = other697.__isset;
+NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other699) {
+  message = other699.message;
+  __isset = other699.__isset;
   return *this;
 }
 void NoSuchObjectException::printTo(std::ostream& out) const {
@@ -16002,13 +16159,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) {
   swap(a.__isset, b.__isset);
 }
 
-IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other698) : TException() {
-  message = other698.message;
-  __isset = other698.__isset;
+IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other700) : TException() {
+  message = other700.message;
+  __isset = other700.__isset;
 }
-IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other699) {
-  message = other699.message;
-  __isset = other699.__isset;
+IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other701) {
+  message = other701.message;
+  __isset = other701.__isset;
   return *this;
 }
 void IndexAlreadyExistsException::printTo(std::ostream& out) const {
@@ -16099,13 +16256,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidOperationException::InvalidOperationException(const InvalidOperationException& other700) : TException() {
-  message = other700.message;
-  __isset = other700.__isset;
+InvalidOperationException::InvalidOperationException(const InvalidOperationException& other702) : TException() {
+  message = other702.message;
+  __isset = other702.__isset;
 }
-InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other701) {
-  message = other701.message;
-  __isset = other701.__isset;
+InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other703) {
+  message = other703.message;
+  __isset = other703.__isset;
   return *this;
 }
 void InvalidOperationException::printTo(std::ostream& out) const {
@@ -16196,13 +16353,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) {
   swap(a.__isset, b.__isset);
 }
 
-ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other702) : TException() {
-  message = other702.message;
-  __isset = other702.__isset;
+ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other704) : TException() {
+  message = other704.message;
+  __isset = other704.__isset;
 }
-ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other703) {
-  message = other703.message;
-  __isset = other703.__isset;
+ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other705) {
+  message = other705.message;
+  __isset = other705.__isset;
   return *this;
 }
 void ConfigValSecurityException::printTo(std::ostream& out) const {
@@ -16293,13 +16450,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidInputException::InvalidInputException(const InvalidInputException& other704) : TException() {
-  message = other704.message;
-  __isset = other704.__isset;
+InvalidInputException::InvalidInputException(const InvalidInputException& other706) : TException() {
+  message = other706.message;
+  __isset = other706.__isset;
 }
-InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other705) {
-  message = other705.message;
-  __isset = other705.__isset;
+InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other707) {
+  message = other707.message;
+  __isset = other707.__isset;
   return *this;
 }
 void InvalidInputException::printTo(std::ostream& out) const {
@@ -16390,13 +16547,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other706) : TException() {
-  message = other706.message;
-  __isset = other706.__isset;
+NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other708) : TException() {
+  message = other708.message;
+  __isset = other708.__isset;
 }
-NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other707) {
-  message = other707.message;
-  __isset = other707.__isset;
+NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other709) {
+  message = other709.message;
+  __isset = other709.__isset;
   return *this;
 }
 void NoSuchTxnException::printTo(std::ostream& out) const {
@@ -16487,13 +16644,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) {
   swap(a.__isset, b.__isset);
 }
 
-TxnAbortedException::TxnAbortedException(const TxnAbortedException& other708) : TException() {
-  message = other708.message;
-  __isset = other708.__isset;
+TxnAbortedException::TxnAbortedException(const TxnAbortedException& other710) : TException() {
+  message = other710.message;
+  __isset = other710.__isset;
 }
-TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other709) {
-  message = other709.message;
-  __isset = other709.__isset;
+TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other711) {
+  message = other711.message;
+  __isset = other711.__isset;
   return *this;
 }
 void TxnAbortedException::printTo(std::ostream& out) const {
@@ -16584,13 +16741,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) {
   swap(a.__isset, b.__isset);
 }
 
-TxnOpenException::TxnOpenException(const TxnOpenException& other710) : TException() {
-  message = other710.message;
-  __isset = other710.__isset;
+TxnOpenException::TxnOpenException(const TxnOpenException& other712) : TException() {
+  message = other712.message;
+  __isset = other712.__isset;
 }
-TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other711) {
-  message = other711.message;
-  __isset = other711.__isset;
+TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other713) {
+  message = other713.message;
+  __isset = other713.__isset;
   return *this;
 }
 void TxnOpenException::printTo(std::ostream& out) const {
@@ -16681,13 +16838,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchLockException::NoSuchLockException(const NoSuchLockException& other712) : TException() {
-  message = other712.message;
-  __isset = other712.__isset;
+NoSuchLockException::NoSuchLockException(const NoSuchLockException& other714) : TException() {
+  message = other714.message;
+  __isset = other714.__isset;
 }
-NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other713) {
-  message = other713.message;
-  __isset = other713.__isset;
+NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other715) {
+  message = other715.message;
+  __isset = other715.__isset;
   return *this;
 }
 void NoSuchLockException::printTo(std::ostream& out) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 53ab272..05c288c 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -349,6 +349,8 @@ class ClearFileMetadataRequest;
 
 class GetAllFunctionsResponse;
 
+class TableMeta;
+
 class MetaException;
 
 class UnknownTableException;
@@ -6158,6 +6160,69 @@ inline std::ostream& operator<<(std::ostream& out, const GetAllFunctionsResponse
   return out;
 }
 
+typedef struct _TableMeta__isset {
+  _TableMeta__isset() : comments(false) {}
+  bool comments :1;
+} _TableMeta__isset;
+
+class TableMeta {
+ public:
+
+  TableMeta(const TableMeta&);
+  TableMeta& operator=(const TableMeta&);
+  TableMeta() : dbName(), tableName(), tableType(), comments() {
+  }
+
+  virtual ~TableMeta() throw();
+  std::string dbName;
+  std::string tableName;
+  std::string tableType;
+  std::string comments;
+
+  _TableMeta__isset __isset;
+
+  void __set_dbName(const std::string& val);
+
+  void __set_tableName(const std::string& val);
+
+  void __set_tableType(const std::string& val);
+
+  void __set_comments(const std::string& val);
+
+  bool operator == (const TableMeta & rhs) const
+  {
+    if (!(dbName == rhs.dbName))
+      return false;
+    if (!(tableName == rhs.tableName))
+      return false;
+    if (!(tableType == rhs.tableType))
+      return false;
+    if (__isset.comments != rhs.__isset.comments)
+      return false;
+    else if (__isset.comments && !(comments == rhs.comments))
+      return false;
+    return true;
+  }
+  bool operator != (const TableMeta &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const TableMeta & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(TableMeta &a, TableMeta &b);
+
+inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
 typedef struct _MetaException__isset {
   _MetaException__isset() : message(false) {}
   bool message :1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java
new file mode 100644
index 0000000..08a8e36
--- /dev/null
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java
@@ -0,0 +1,701 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class TableMeta implements org.apache.thrift.TBase<TableMeta, TableMeta._Fields>, java.io.Serializable, Cloneable, Comparable<TableMeta> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableMeta");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField COMMENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("comments", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TableMetaStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TableMetaTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tableName; // required
+  private String tableType; // required
+  private String comments; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TABLE_NAME((short)2, "tableName"),
+    TABLE_TYPE((short)3, "tableType"),
+    COMMENTS((short)4, "comments");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TABLE_NAME
+          return TABLE_NAME;
+        case 3: // TABLE_TYPE
+          return TABLE_TYPE;
+        case 4: // COMMENTS
+          return COMMENTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.COMMENTS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COMMENTS, new org.apache.thrift.meta_data.FieldMetaData("comments", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableMeta.class, metaDataMap);
+  }
+
+  public TableMeta() {
+  }
+
+  public TableMeta(
+    String dbName,
+    String tableName,
+    String tableType)
+  {
+    this();
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.tableType = tableType;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TableMeta(TableMeta other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetTableType()) {
+      this.tableType = other.tableType;
+    }
+    if (other.isSetComments()) {
+      this.comments = other.comments;
+    }
+  }
+
+  public TableMeta deepCopy() {
+    return new TableMeta(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tableName = null;
+    this.tableType = null;
+    this.comments = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public String getTableType() {
+    return this.tableType;
+  }
+
+  public void setTableType(String tableType) {
+    this.tableType = tableType;
+  }
+
+  public void unsetTableType() {
+    this.tableType = null;
+  }
+
+  /** Returns true if field tableType is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableType() {
+    return this.tableType != null;
+  }
+
+  public void setTableTypeIsSet(boolean value) {
+    if (!value) {
+      this.tableType = null;
+    }
+  }
+
+  public String getComments() {
+    return this.comments;
+  }
+
+  public void setComments(String comments) {
+    this.comments = comments;
+  }
+
+  public void unsetComments() {
+    this.comments = null;
+  }
+
+  /** Returns true if field comments is set (has been assigned a value) and false otherwise */
+  public boolean isSetComments() {
+    return this.comments != null;
+  }
+
+  public void setCommentsIsSet(boolean value) {
+    if (!value) {
+      this.comments = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case TABLE_TYPE:
+      if (value == null) {
+        unsetTableType();
+      } else {
+        setTableType((String)value);
+      }
+      break;
+
+    case COMMENTS:
+      if (value == null) {
+        unsetComments();
+      } else {
+        setComments((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case TABLE_TYPE:
+      return getTableType();
+
+    case COMMENTS:
+      return getComments();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case TABLE_TYPE:
+      return isSetTableType();
+    case COMMENTS:
+      return isSetComments();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TableMeta)
+      return this.equals((TableMeta)that);
+    return false;
+  }
+
+  public boolean equals(TableMeta that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_tableType = true && this.isSetTableType();
+    boolean that_present_tableType = true && that.isSetTableType();
+    if (this_present_tableType || that_present_tableType) {
+      if (!(this_present_tableType && that_present_tableType))
+        return false;
+      if (!this.tableType.equals(that.tableType))
+        return false;
+    }
+
+    boolean this_present_comments = true && this.isSetComments();
+    boolean that_present_comments = true && that.isSetComments();
+    if (this_present_comments || that_present_comments) {
+      if (!(this_present_comments && that_present_comments))
+        return false;
+      if (!this.comments.equals(that.comments))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_tableType = true && (isSetTableType());
+    list.add(present_tableType);
+    if (present_tableType)
+      list.add(tableType);
+
+    boolean present_comments = true && (isSetComments());
+    list.add(present_comments);
+    if (present_comments)
+      list.add(comments);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(TableMeta other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableType()).compareTo(other.isSetTableType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableType, other.tableType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetComments()).compareTo(other.isSetComments());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetComments()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comments, other.comments);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TableMeta(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableType:");
+    if (this.tableType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableType);
+    }
+    first = false;
+    if (isSetComments()) {
+      if (!first) sb.append(", ");
+      sb.append("comments:");
+      if (this.comments == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.comments);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableType' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TableMetaStandardSchemeFactory implements SchemeFactory {
+    public TableMetaStandardScheme getScheme() {
+      return new TableMetaStandardScheme();
+    }
+  }
+
+  private static class TableMetaStandardScheme extends StandardScheme<TableMeta> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TableMeta struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableType = iprot.readString();
+              struct.setTableTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COMMENTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.comments = iprot.readString();
+              struct.setCommentsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TableMeta struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableType != null) {
+        oprot.writeFieldBegin(TABLE_TYPE_FIELD_DESC);
+        oprot.writeString(struct.tableType);
+        oprot.writeFieldEnd();
+      }
+      if (struct.comments != null) {
+        if (struct.isSetComments()) {
+          oprot.writeFieldBegin(COMMENTS_FIELD_DESC);
+          oprot.writeString(struct.comments);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TableMetaTupleSchemeFactory implements SchemeFactory {
+    public TableMetaTupleScheme getScheme() {
+      return new TableMetaTupleScheme();
+    }
+  }
+
+  private static class TableMetaTupleScheme extends TupleScheme<TableMeta> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tableName);
+      oprot.writeString(struct.tableType);
+      BitSet optionals = new BitSet();
+      if (struct.isSetComments()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetComments()) {
+        oprot.writeString(struct.comments);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tableName = iprot.readString();
+      struct.setTableNameIsSet(true);
+      struct.tableType = iprot.readString();
+      struct.setTableTypeIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.comments = iprot.readString();
+        struct.setCommentsIsSet(true);
+      }
+    }
+  }
+
+}
+


[27/55] [abbrv] hive git commit: HIVE-11726: Pushed IN predicates to the metastore (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by xu...@apache.org.
HIVE-11726: Pushed IN predicates to the metastore (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0d3a75df
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0d3a75df
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0d3a75df

Branch: refs/heads/spark
Commit: 0d3a75dfdf54312f297f39d4b2928ba34e2101ea
Parents: 1305ea9
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Nov 3 18:32:14 2015 +0200
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Nov 6 17:57:12 2015 +0100

----------------------------------------------------------------------
 .../hadoop/hive/metastore/parser/Filter.g       | 218 ++++++++
 .../test/queries/clientpositive/pointlookup4.q  |  27 +
 .../results/clientpositive/pointlookup4.q.out   | 530 +++++++++++++++++++
 3 files changed, 775 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0d3a75df/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
index 8aef5bf..81111a0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
@@ -122,6 +122,10 @@ operatorExpression
     :
     betweenExpression
     |
+    inExpression
+    |
+    multiColInExpression
+    |
     binOpExpression
     ;
 
@@ -203,16 +207,229 @@ betweenExpression
         tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
     };
 
+inExpression
+@init {
+    List constants = new ArrayList();
+    Object constantV = null;
+    boolean isPositive = true;
+}
+    :
+    (
+        LPAREN key = Identifier RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN
+        (
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = DateLiteral
+                    {
+                        constantV = FilterLexer.ExtractDate(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+            |
+            (
+                constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = StringLiteral
+                    {
+                        constantV = TrimQuotes(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+            |
+            (
+                constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = IntegralLiteral
+                    {
+                        constantV = Long.parseLong(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+        ) RPAREN
+    )
+    {
+        for (int i = 0; i < constants.size(); i++) {
+            Object value = constants.get(i);
+            LeafNode leaf = new LeafNode();
+            leaf.keyName = key.getText();
+            leaf.value = value;
+            leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
+            tree.addLeafNode(leaf);
+            if (i != 0) {
+                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
+            }
+        }
+    };
+
+multiColInExpression
+@init {
+    List<String> keyNames = new ArrayList<String>();
+    List constants = new ArrayList();
+    List partialConstants;
+    String keyV = null;
+    Object constantV = null;
+    boolean isPositive = true;
+}
+    :
+    (
+        LPAREN
+        (
+            KW_STRUCT LPAREN key = Identifier
+            {
+                keyV = key.getText();
+                keyNames.add(keyV);
+            }
+            (
+                COMMA key = Identifier
+                {
+                    keyV = key.getText();
+                    keyNames.add(keyV);
+                }
+            )* RPAREN
+        ) RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN KW_CONST KW_STRUCT LPAREN
+        {
+            partialConstants = new ArrayList();
+        }
+        (
+            constant = DateLiteral
+            {
+                constantV = FilterLexer.ExtractDate(constant.getText());
+                partialConstants.add(constantV);
+            }
+            | constant = StringLiteral
+            {
+                constantV = TrimQuotes(constant.getText());
+                partialConstants.add(constantV);
+            }
+            | constant = IntegralLiteral
+            {
+                constantV = Long.parseLong(constant.getText());
+                partialConstants.add(constantV);
+            }
+        )
+        (
+            COMMA
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    partialConstants.add(constantV);
+                }
+            )
+        )*
+        {
+            constants.add(partialConstants);
+        }
+        RPAREN
+        (
+            COMMA KW_CONST KW_STRUCT LPAREN
+            {
+                partialConstants = new ArrayList();
+            }
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    partialConstants.add(constantV);
+                }
+            )
+            (
+                COMMA
+                (
+                    constant = DateLiteral
+                    {
+                        constantV = FilterLexer.ExtractDate(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                    | constant = StringLiteral
+                    {
+                        constantV = TrimQuotes(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                    | constant = IntegralLiteral
+                    {
+                        constantV = Long.parseLong(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                )
+            )*
+            {
+                constants.add(partialConstants);
+            }
+            RPAREN
+        )* RPAREN
+    )
+    {
+        for (int i = 0; i < constants.size(); i++) {
+            List list = (List) constants.get(i);
+            assert keyNames.size() == list.size();
+            for (int j=0; j < list.size(); j++) {
+                String keyName = keyNames.get(j);
+                Object value = list.get(j);
+                LeafNode leaf = new LeafNode();
+                leaf.keyName = keyName;
+                leaf.value = value;
+                leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
+                tree.addLeafNode(leaf);
+                if (j != 0) {
+                    tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
+                }
+            }
+            if (i != 0) {
+                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
+            }
+        }
+    };
+
 // Keywords
 KW_NOT : 'NOT';
 KW_AND : 'AND';
 KW_OR : 'OR';
 KW_LIKE : 'LIKE';
 KW_DATE : 'date';
+KW_CONST : 'CONST';
+KW_STRUCT : 'STRUCT';
 
 // Operators
 LPAREN : '(' ;
 RPAREN : ')' ;
+COMMA : ',' ;
 EQUAL : '=';
 NOTEQUAL : '<>' | '!=';
 LESSTHANOREQUALTO : '<=';
@@ -220,6 +437,7 @@ LESSTHAN : '<';
 GREATERTHANOREQUALTO : '>=';
 GREATERTHAN : '>';
 BETWEEN : 'BETWEEN';
+IN : 'IN';
 
 // LITERALS
 fragment

http://git-wip-us.apache.org/repos/asf/hive/blob/0d3a75df/ql/src/test/queries/clientpositive/pointlookup4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup4.q b/ql/src/test/queries/clientpositive/pointlookup4.q
new file mode 100644
index 0000000..e0bf5a6
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/pointlookup4.q
@@ -0,0 +1,27 @@
+drop table pcr_t1;
+
+create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string);
+insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key;
+
+set hive.optimize.point.lookup=false;
+set hive.optimize.partition.columns.separate=false;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2;
+
+set hive.optimize.point.lookup=true;
+set hive.optimize.point.lookup.min=0;
+set hive.optimize.partition.columns.separate=true;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2;
+
+drop table pcr_t1;

http://git-wip-us.apache.org/repos/asf/hive/blob/0d3a75df/ql/src/test/results/clientpositive/pointlookup4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup4.q.out b/ql/src/test/results/clientpositive/pointlookup4.q.out
new file mode 100644
index 0000000..157aea6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/pointlookup4.q.out
@@ -0,0 +1,530 @@
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@pcr_t1
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcr_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               key
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               value
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds2
+      TOK_WHERE
+         or
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-08'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-08'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  1
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-09'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-09'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+      TOK_ORDERBY
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               key
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               value
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcr_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (((ds1 = '2000-04-08') and (ds2 = '2001-04-08') and (key = 1)) or ((ds1 = '2000-04-09') and (ds2 = '2001-04-09') and (key = 2))) (type: boolean)
+              Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                  tag: -1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-08
+              ds2 2001-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-09
+              ds2 2001-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+      Truncated Path -> Alias:
+        /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+        /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2,_col3
+                  columns.types int:string:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcr_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               key
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               value
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds2
+      TOK_WHERE
+         or
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-08'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-08'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  1
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-09'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-09'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+      TOK_ORDERBY
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               key
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               value
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcr_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds1,key,ds2)) IN (const struct('2000-04-08',1,'2001-04-08'), const struct('2000-04-09',2,'2001-04-09')) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                  tag: -1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-08
+              ds2 2001-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-09
+              ds2 2001-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+      Truncated Path -> Alias:
+        /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+        /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2,_col3
+                  columns.types int:string:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@pcr_t1
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@pcr_t1
+POSTHOOK: Output: default@pcr_t1


[41/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 9873810..5c72a27 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -183,6 +183,15 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
+  def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+    """
+    Parameters:
+     - db_patterns
+     - tbl_patterns
+     - tbl_types
+    """
+    pass
+
   def get_all_tables(self, db_name):
     """
     Parameters:
@@ -1830,6 +1839,43 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o1
     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result")
 
+  def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+    """
+    Parameters:
+     - db_patterns
+     - tbl_patterns
+     - tbl_types
+    """
+    self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+    return self.recv_get_table_meta()
+
+  def send_get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+    self._oprot.writeMessageBegin('get_table_meta', TMessageType.CALL, self._seqid)
+    args = get_table_meta_args()
+    args.db_patterns = db_patterns
+    args.tbl_patterns = tbl_patterns
+    args.tbl_types = tbl_types
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_table_meta(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_table_meta_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_meta failed: unknown result")
+
   def get_all_tables(self, db_name):
     """
     Parameters:
@@ -5852,6 +5898,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["drop_table"] = Processor.process_drop_table
     self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context
     self._processMap["get_tables"] = Processor.process_get_tables
+    self._processMap["get_table_meta"] = Processor.process_get_table_meta
     self._processMap["get_all_tables"] = Processor.process_get_all_tables
     self._processMap["get_table"] = Processor.process_get_table
     self._processMap["get_table_objects_by_name"] = Processor.process_get_table_objects_by_name
@@ -6517,6 +6564,28 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_get_table_meta(self, seqid, iprot, oprot):
+    args = get_table_meta_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_table_meta_result()
+    try:
+      result.success = self._handler.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except MetaException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_table_meta", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_get_all_tables(self, seqid, iprot, oprot):
     args = get_all_tables_args()
     args.read(iprot)
@@ -12752,6 +12821,192 @@ class get_tables_result:
   def __ne__(self, other):
     return not (self == other)
 
+class get_table_meta_args:
+  """
+  Attributes:
+   - db_patterns
+   - tbl_patterns
+   - tbl_types
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'db_patterns', None, None, ), # 1
+    (2, TType.STRING, 'tbl_patterns', None, None, ), # 2
+    (3, TType.LIST, 'tbl_types', (TType.STRING,None), None, ), # 3
+  )
+
+  def __init__(self, db_patterns=None, tbl_patterns=None, tbl_types=None,):
+    self.db_patterns = db_patterns
+    self.tbl_patterns = tbl_patterns
+    self.tbl_types = tbl_types
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.db_patterns = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tbl_patterns = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.tbl_types = []
+          (_etype583, _size580) = iprot.readListBegin()
+          for _i584 in xrange(_size580):
+            _elem585 = iprot.readString()
+            self.tbl_types.append(_elem585)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_table_meta_args')
+    if self.db_patterns is not None:
+      oprot.writeFieldBegin('db_patterns', TType.STRING, 1)
+      oprot.writeString(self.db_patterns)
+      oprot.writeFieldEnd()
+    if self.tbl_patterns is not None:
+      oprot.writeFieldBegin('tbl_patterns', TType.STRING, 2)
+      oprot.writeString(self.tbl_patterns)
+      oprot.writeFieldEnd()
+    if self.tbl_types is not None:
+      oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.tbl_types))
+      for iter586 in self.tbl_types:
+        oprot.writeString(iter586)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.db_patterns)
+    value = (value * 31) ^ hash(self.tbl_patterns)
+    value = (value * 31) ^ hash(self.tbl_types)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_table_meta_result:
+  """
+  Attributes:
+   - success
+   - o1
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(TableMeta, TableMeta.thrift_spec)), None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, success=None, o1=None,):
+    self.success = success
+    self.o1 = o1
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype590, _size587) = iprot.readListBegin()
+          for _i591 in xrange(_size587):
+            _elem592 = TableMeta()
+            _elem592.read(iprot)
+            self.success.append(_elem592)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_table_meta_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter593 in self.success:
+        iter593.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.o1 is not None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    value = (value * 31) ^ hash(self.o1)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class get_all_tables_args:
   """
   Attributes:
@@ -12845,10 +13100,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype583, _size580) = iprot.readListBegin()
-          for _i584 in xrange(_size580):
-            _elem585 = iprot.readString()
-            self.success.append(_elem585)
+          (_etype597, _size594) = iprot.readListBegin()
+          for _i598 in xrange(_size594):
+            _elem599 = iprot.readString()
+            self.success.append(_elem599)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12871,8 +13126,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter586 in self.success:
-        oprot.writeString(iter586)
+      for iter600 in self.success:
+        oprot.writeString(iter600)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13108,10 +13363,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype590, _size587) = iprot.readListBegin()
-          for _i591 in xrange(_size587):
-            _elem592 = iprot.readString()
-            self.tbl_names.append(_elem592)
+          (_etype604, _size601) = iprot.readListBegin()
+          for _i605 in xrange(_size601):
+            _elem606 = iprot.readString()
+            self.tbl_names.append(_elem606)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13132,8 +13387,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter593 in self.tbl_names:
-        oprot.writeString(iter593)
+      for iter607 in self.tbl_names:
+        oprot.writeString(iter607)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -13194,11 +13449,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype597, _size594) = iprot.readListBegin()
-          for _i598 in xrange(_size594):
-            _elem599 = Table()
-            _elem599.read(iprot)
-            self.success.append(_elem599)
+          (_etype611, _size608) = iprot.readListBegin()
+          for _i612 in xrange(_size608):
+            _elem613 = Table()
+            _elem613.read(iprot)
+            self.success.append(_elem613)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13233,8 +13488,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter600 in self.success:
-        iter600.write(oprot)
+      for iter614 in self.success:
+        iter614.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13400,10 +13655,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype604, _size601) = iprot.readListBegin()
-          for _i605 in xrange(_size601):
-            _elem606 = iprot.readString()
-            self.success.append(_elem606)
+          (_etype618, _size615) = iprot.readListBegin()
+          for _i619 in xrange(_size615):
+            _elem620 = iprot.readString()
+            self.success.append(_elem620)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13438,8 +13693,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter607 in self.success:
-        oprot.writeString(iter607)
+      for iter621 in self.success:
+        oprot.writeString(iter621)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -14409,11 +14664,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype611, _size608) = iprot.readListBegin()
-          for _i612 in xrange(_size608):
-            _elem613 = Partition()
-            _elem613.read(iprot)
-            self.new_parts.append(_elem613)
+          (_etype625, _size622) = iprot.readListBegin()
+          for _i626 in xrange(_size622):
+            _elem627 = Partition()
+            _elem627.read(iprot)
+            self.new_parts.append(_elem627)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14430,8 +14685,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter614 in self.new_parts:
-        iter614.write(oprot)
+      for iter628 in self.new_parts:
+        iter628.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -14589,11 +14844,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype618, _size615) = iprot.readListBegin()
-          for _i619 in xrange(_size615):
-            _elem620 = PartitionSpec()
-            _elem620.read(iprot)
-            self.new_parts.append(_elem620)
+          (_etype632, _size629) = iprot.readListBegin()
+          for _i633 in xrange(_size629):
+            _elem634 = PartitionSpec()
+            _elem634.read(iprot)
+            self.new_parts.append(_elem634)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14610,8 +14865,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter621 in self.new_parts:
-        iter621.write(oprot)
+      for iter635 in self.new_parts:
+        iter635.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -14785,10 +15040,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype625, _size622) = iprot.readListBegin()
-          for _i626 in xrange(_size622):
-            _elem627 = iprot.readString()
-            self.part_vals.append(_elem627)
+          (_etype639, _size636) = iprot.readListBegin()
+          for _i640 in xrange(_size636):
+            _elem641 = iprot.readString()
+            self.part_vals.append(_elem641)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14813,8 +15068,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter628 in self.part_vals:
-        oprot.writeString(iter628)
+      for iter642 in self.part_vals:
+        oprot.writeString(iter642)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -15167,10 +15422,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype632, _size629) = iprot.readListBegin()
-          for _i633 in xrange(_size629):
-            _elem634 = iprot.readString()
-            self.part_vals.append(_elem634)
+          (_etype646, _size643) = iprot.readListBegin()
+          for _i647 in xrange(_size643):
+            _elem648 = iprot.readString()
+            self.part_vals.append(_elem648)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15201,8 +15456,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter635 in self.part_vals:
-        oprot.writeString(iter635)
+      for iter649 in self.part_vals:
+        oprot.writeString(iter649)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -15797,10 +16052,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype639, _size636) = iprot.readListBegin()
-          for _i640 in xrange(_size636):
-            _elem641 = iprot.readString()
-            self.part_vals.append(_elem641)
+          (_etype653, _size650) = iprot.readListBegin()
+          for _i654 in xrange(_size650):
+            _elem655 = iprot.readString()
+            self.part_vals.append(_elem655)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15830,8 +16085,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter642 in self.part_vals:
-        oprot.writeString(iter642)
+      for iter656 in self.part_vals:
+        oprot.writeString(iter656)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -16004,10 +16259,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype646, _size643) = iprot.readListBegin()
-          for _i647 in xrange(_size643):
-            _elem648 = iprot.readString()
-            self.part_vals.append(_elem648)
+          (_etype660, _size657) = iprot.readListBegin()
+          for _i661 in xrange(_size657):
+            _elem662 = iprot.readString()
+            self.part_vals.append(_elem662)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16043,8 +16298,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter649 in self.part_vals:
-        oprot.writeString(iter649)
+      for iter663 in self.part_vals:
+        oprot.writeString(iter663)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -16781,10 +17036,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype653, _size650) = iprot.readListBegin()
-          for _i654 in xrange(_size650):
-            _elem655 = iprot.readString()
-            self.part_vals.append(_elem655)
+          (_etype667, _size664) = iprot.readListBegin()
+          for _i668 in xrange(_size664):
+            _elem669 = iprot.readString()
+            self.part_vals.append(_elem669)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16809,8 +17064,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter656 in self.part_vals:
-        oprot.writeString(iter656)
+      for iter670 in self.part_vals:
+        oprot.writeString(iter670)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16969,11 +17224,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype658, _vtype659, _size657 ) = iprot.readMapBegin()
-          for _i661 in xrange(_size657):
-            _key662 = iprot.readString()
-            _val663 = iprot.readString()
-            self.partitionSpecs[_key662] = _val663
+          (_ktype672, _vtype673, _size671 ) = iprot.readMapBegin()
+          for _i675 in xrange(_size671):
+            _key676 = iprot.readString()
+            _val677 = iprot.readString()
+            self.partitionSpecs[_key676] = _val677
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -17010,9 +17265,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter664,viter665 in self.partitionSpecs.items():
-        oprot.writeString(kiter664)
-        oprot.writeString(viter665)
+      for kiter678,viter679 in self.partitionSpecs.items():
+        oprot.writeString(kiter678)
+        oprot.writeString(viter679)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -17217,11 +17472,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype667, _vtype668, _size666 ) = iprot.readMapBegin()
-          for _i670 in xrange(_size666):
-            _key671 = iprot.readString()
-            _val672 = iprot.readString()
-            self.partitionSpecs[_key671] = _val672
+          (_ktype681, _vtype682, _size680 ) = iprot.readMapBegin()
+          for _i684 in xrange(_size680):
+            _key685 = iprot.readString()
+            _val686 = iprot.readString()
+            self.partitionSpecs[_key685] = _val686
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -17258,9 +17513,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter673,viter674 in self.partitionSpecs.items():
-        oprot.writeString(kiter673)
-        oprot.writeString(viter674)
+      for kiter687,viter688 in self.partitionSpecs.items():
+        oprot.writeString(kiter687)
+        oprot.writeString(viter688)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -17343,11 +17598,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype678, _size675) = iprot.readListBegin()
-          for _i679 in xrange(_size675):
-            _elem680 = Partition()
-            _elem680.read(iprot)
-            self.success.append(_elem680)
+          (_etype692, _size689) = iprot.readListBegin()
+          for _i693 in xrange(_size689):
+            _elem694 = Partition()
+            _elem694.read(iprot)
+            self.success.append(_elem694)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17388,8 +17643,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter681 in self.success:
-        iter681.write(oprot)
+      for iter695 in self.success:
+        iter695.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17483,10 +17738,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype685, _size682) = iprot.readListBegin()
-          for _i686 in xrange(_size682):
-            _elem687 = iprot.readString()
-            self.part_vals.append(_elem687)
+          (_etype699, _size696) = iprot.readListBegin()
+          for _i700 in xrange(_size696):
+            _elem701 = iprot.readString()
+            self.part_vals.append(_elem701)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17498,10 +17753,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype691, _size688) = iprot.readListBegin()
-          for _i692 in xrange(_size688):
-            _elem693 = iprot.readString()
-            self.group_names.append(_elem693)
+          (_etype705, _size702) = iprot.readListBegin()
+          for _i706 in xrange(_size702):
+            _elem707 = iprot.readString()
+            self.group_names.append(_elem707)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17526,8 +17781,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter694 in self.part_vals:
-        oprot.writeString(iter694)
+      for iter708 in self.part_vals:
+        oprot.writeString(iter708)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -17537,8 +17792,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter695 in self.group_names:
-        oprot.writeString(iter695)
+      for iter709 in self.group_names:
+        oprot.writeString(iter709)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17967,11 +18222,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype699, _size696) = iprot.readListBegin()
-          for _i700 in xrange(_size696):
-            _elem701 = Partition()
-            _elem701.read(iprot)
-            self.success.append(_elem701)
+          (_etype713, _size710) = iprot.readListBegin()
+          for _i714 in xrange(_size710):
+            _elem715 = Partition()
+            _elem715.read(iprot)
+            self.success.append(_elem715)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18000,8 +18255,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter702 in self.success:
-        iter702.write(oprot)
+      for iter716 in self.success:
+        iter716.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18095,10 +18350,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype706, _size703) = iprot.readListBegin()
-          for _i707 in xrange(_size703):
-            _elem708 = iprot.readString()
-            self.group_names.append(_elem708)
+          (_etype720, _size717) = iprot.readListBegin()
+          for _i721 in xrange(_size717):
+            _elem722 = iprot.readString()
+            self.group_names.append(_elem722)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18131,8 +18386,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter709 in self.group_names:
-        oprot.writeString(iter709)
+      for iter723 in self.group_names:
+        oprot.writeString(iter723)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18193,11 +18448,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype713, _size710) = iprot.readListBegin()
-          for _i714 in xrange(_size710):
-            _elem715 = Partition()
-            _elem715.read(iprot)
-            self.success.append(_elem715)
+          (_etype727, _size724) = iprot.readListBegin()
+          for _i728 in xrange(_size724):
+            _elem729 = Partition()
+            _elem729.read(iprot)
+            self.success.append(_elem729)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18226,8 +18481,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter716 in self.success:
-        iter716.write(oprot)
+      for iter730 in self.success:
+        iter730.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18385,11 +18640,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype720, _size717) = iprot.readListBegin()
-          for _i721 in xrange(_size717):
-            _elem722 = PartitionSpec()
-            _elem722.read(iprot)
-            self.success.append(_elem722)
+          (_etype734, _size731) = iprot.readListBegin()
+          for _i735 in xrange(_size731):
+            _elem736 = PartitionSpec()
+            _elem736.read(iprot)
+            self.success.append(_elem736)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18418,8 +18673,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter723 in self.success:
-        iter723.write(oprot)
+      for iter737 in self.success:
+        iter737.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18574,10 +18829,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype727, _size724) = iprot.readListBegin()
-          for _i728 in xrange(_size724):
-            _elem729 = iprot.readString()
-            self.success.append(_elem729)
+          (_etype741, _size738) = iprot.readListBegin()
+          for _i742 in xrange(_size738):
+            _elem743 = iprot.readString()
+            self.success.append(_elem743)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18600,8 +18855,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter730 in self.success:
-        oprot.writeString(iter730)
+      for iter744 in self.success:
+        oprot.writeString(iter744)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -18677,10 +18932,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype734, _size731) = iprot.readListBegin()
-          for _i735 in xrange(_size731):
-            _elem736 = iprot.readString()
-            self.part_vals.append(_elem736)
+          (_etype748, _size745) = iprot.readListBegin()
+          for _i749 in xrange(_size745):
+            _elem750 = iprot.readString()
+            self.part_vals.append(_elem750)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18710,8 +18965,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter737 in self.part_vals:
-        oprot.writeString(iter737)
+      for iter751 in self.part_vals:
+        oprot.writeString(iter751)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -18775,11 +19030,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype741, _size738) = iprot.readListBegin()
-          for _i742 in xrange(_size738):
-            _elem743 = Partition()
-            _elem743.read(iprot)
-            self.success.append(_elem743)
+          (_etype755, _size752) = iprot.readListBegin()
+          for _i756 in xrange(_size752):
+            _elem757 = Partition()
+            _elem757.read(iprot)
+            self.success.append(_elem757)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18808,8 +19063,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter744 in self.success:
-        iter744.write(oprot)
+      for iter758 in self.success:
+        iter758.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18896,10 +19151,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype748, _size745) = iprot.readListBegin()
-          for _i749 in xrange(_size745):
-            _elem750 = iprot.readString()
-            self.part_vals.append(_elem750)
+          (_etype762, _size759) = iprot.readListBegin()
+          for _i763 in xrange(_size759):
+            _elem764 = iprot.readString()
+            self.part_vals.append(_elem764)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18916,10 +19171,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype754, _size751) = iprot.readListBegin()
-          for _i755 in xrange(_size751):
-            _elem756 = iprot.readString()
-            self.group_names.append(_elem756)
+          (_etype768, _size765) = iprot.readListBegin()
+          for _i769 in xrange(_size765):
+            _elem770 = iprot.readString()
+            self.group_names.append(_elem770)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18944,8 +19199,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter757 in self.part_vals:
-        oprot.writeString(iter757)
+      for iter771 in self.part_vals:
+        oprot.writeString(iter771)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -18959,8 +19214,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter758 in self.group_names:
-        oprot.writeString(iter758)
+      for iter772 in self.group_names:
+        oprot.writeString(iter772)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19022,11 +19277,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype762, _size759) = iprot.readListBegin()
-          for _i763 in xrange(_size759):
-            _elem764 = Partition()
-            _elem764.read(iprot)
-            self.success.append(_elem764)
+          (_etype776, _size773) = iprot.readListBegin()
+          for _i777 in xrange(_size773):
+            _elem778 = Partition()
+            _elem778.read(iprot)
+            self.success.append(_elem778)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19055,8 +19310,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter765 in self.success:
-        iter765.write(oprot)
+      for iter779 in self.success:
+        iter779.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19137,10 +19392,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype769, _size766) = iprot.readListBegin()
-          for _i770 in xrange(_size766):
-            _elem771 = iprot.readString()
-            self.part_vals.append(_elem771)
+          (_etype783, _size780) = iprot.readListBegin()
+          for _i784 in xrange(_size780):
+            _elem785 = iprot.readString()
+            self.part_vals.append(_elem785)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19170,8 +19425,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter772 in self.part_vals:
-        oprot.writeString(iter772)
+      for iter786 in self.part_vals:
+        oprot.writeString(iter786)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -19235,10 +19490,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype776, _size773) = iprot.readListBegin()
-          for _i777 in xrange(_size773):
-            _elem778 = iprot.readString()
-            self.success.append(_elem778)
+          (_etype790, _size787) = iprot.readListBegin()
+          for _i791 in xrange(_size787):
+            _elem792 = iprot.readString()
+            self.success.append(_elem792)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19267,8 +19522,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter779 in self.success:
-        oprot.writeString(iter779)
+      for iter793 in self.success:
+        oprot.writeString(iter793)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19439,11 +19694,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype783, _size780) = iprot.readListBegin()
-          for _i784 in xrange(_size780):
-            _elem785 = Partition()
-            _elem785.read(iprot)
-            self.success.append(_elem785)
+          (_etype797, _size794) = iprot.readListBegin()
+          for _i798 in xrange(_size794):
+            _elem799 = Partition()
+            _elem799.read(iprot)
+            self.success.append(_elem799)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19472,8 +19727,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter786 in self.success:
-        iter786.write(oprot)
+      for iter800 in self.success:
+        iter800.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19644,11 +19899,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype790, _size787) = iprot.readListBegin()
-          for _i791 in xrange(_size787):
-            _elem792 = PartitionSpec()
-            _elem792.read(iprot)
-            self.success.append(_elem792)
+          (_etype804, _size801) = iprot.readListBegin()
+          for _i805 in xrange(_size801):
+            _elem806 = PartitionSpec()
+            _elem806.read(iprot)
+            self.success.append(_elem806)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19677,8 +19932,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter793 in self.success:
-        iter793.write(oprot)
+      for iter807 in self.success:
+        iter807.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19915,10 +20170,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype797, _size794) = iprot.readListBegin()
-          for _i798 in xrange(_size794):
-            _elem799 = iprot.readString()
-            self.names.append(_elem799)
+          (_etype811, _size808) = iprot.readListBegin()
+          for _i812 in xrange(_size808):
+            _elem813 = iprot.readString()
+            self.names.append(_elem813)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19943,8 +20198,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter800 in self.names:
-        oprot.writeString(iter800)
+      for iter814 in self.names:
+        oprot.writeString(iter814)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20003,11 +20258,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype804, _size801) = iprot.readListBegin()
-          for _i805 in xrange(_size801):
-            _elem806 = Partition()
-            _elem806.read(iprot)
-            self.success.append(_elem806)
+          (_etype818, _size815) = iprot.readListBegin()
+          for _i819 in xrange(_size815):
+            _elem820 = Partition()
+            _elem820.read(iprot)
+            self.success.append(_elem820)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20036,8 +20291,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter807 in self.success:
-        iter807.write(oprot)
+      for iter821 in self.success:
+        iter821.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20287,11 +20542,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype811, _size808) = iprot.readListBegin()
-          for _i812 in xrange(_size808):
-            _elem813 = Partition()
-            _elem813.read(iprot)
-            self.new_parts.append(_elem813)
+          (_etype825, _size822) = iprot.readListBegin()
+          for _i826 in xrange(_size822):
+            _elem827 = Partition()
+            _elem827.read(iprot)
+            self.new_parts.append(_elem827)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20316,8 +20571,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter814 in self.new_parts:
-        iter814.write(oprot)
+      for iter828 in self.new_parts:
+        iter828.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20656,10 +20911,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype818, _size815) = iprot.readListBegin()
-          for _i819 in xrange(_size815):
-            _elem820 = iprot.readString()
-            self.part_vals.append(_elem820)
+          (_etype832, _size829) = iprot.readListBegin()
+          for _i833 in xrange(_size829):
+            _elem834 = iprot.readString()
+            self.part_vals.append(_elem834)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20690,8 +20945,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter821 in self.part_vals:
-        oprot.writeString(iter821)
+      for iter835 in self.part_vals:
+        oprot.writeString(iter835)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -20833,10 +21088,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype825, _size822) = iprot.readListBegin()
-          for _i826 in xrange(_size822):
-            _elem827 = iprot.readString()
-            self.part_vals.append(_elem827)
+          (_etype839, _size836) = iprot.readListBegin()
+          for _i840 in xrange(_size836):
+            _elem841 = iprot.readString()
+            self.part_vals.append(_elem841)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20858,8 +21113,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter828 in self.part_vals:
-        oprot.writeString(iter828)
+      for iter842 in self.part_vals:
+        oprot.writeString(iter842)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -21217,10 +21472,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype832, _size829) = iprot.readListBegin()
-          for _i833 in xrange(_size829):
-            _elem834 = iprot.readString()
-            self.success.append(_elem834)
+          (_etype846, _size843) = iprot.readListBegin()
+          for _i847 in xrange(_size843):
+            _elem848 = iprot.readString()
+            self.success.append(_elem848)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21243,8 +21498,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter835 in self.success:
-        oprot.writeString(iter835)
+      for iter849 in self.success:
+        oprot.writeString(iter849)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21368,11 +21623,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype837, _vtype838, _size836 ) = iprot.readMapBegin()
-          for _i840 in xrange(_size836):
-            _key841 = iprot.readString()
-            _val842 = iprot.readString()
-            self.success[_key841] = _val842
+          (_ktype851, _vtype852, _size850 ) = iprot.readMapBegin()
+          for _i854 in xrange(_size850):
+            _key855 = iprot.readString()
+            _val856 = iprot.readString()
+            self.success[_key855] = _val856
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21395,9 +21650,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter843,viter844 in self.success.items():
-        oprot.writeString(kiter843)
-        oprot.writeString(viter844)
+      for kiter857,viter858 in self.success.items():
+        oprot.writeString(kiter857)
+        oprot.writeString(viter858)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21473,11 +21728,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype846, _vtype847, _size845 ) = iprot.readMapBegin()
-          for _i849 in xrange(_size845):
-            _key850 = iprot.readString()
-            _val851 = iprot.readString()
-            self.part_vals[_key850] = _val851
+          (_ktype860, _vtype861, _size859 ) = iprot.readMapBegin()
+          for _i863 in xrange(_size859):
+            _key864 = iprot.readString()
+            _val865 = iprot.readString()
+            self.part_vals[_key864] = _val865
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21507,9 +21762,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter852,viter853 in self.part_vals.items():
-        oprot.writeString(kiter852)
-        oprot.writeString(viter853)
+      for kiter866,viter867 in self.part_vals.items():
+        oprot.writeString(kiter866)
+        oprot.writeString(viter867)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -21723,11 +21978,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype855, _vtype856, _size854 ) = iprot.readMapBegin()
-          for _i858 in xrange(_size854):
-            _key859 = iprot.readString()
-            _val860 = iprot.readString()
-            self.part_vals[_key859] = _val860
+          (_ktype869, _vtype870, _size868 ) = iprot.readMapBegin()
+          for _i872 in xrange(_size868):
+            _key873 = iprot.readString()
+            _val874 = iprot.readString()
+            self.part_vals[_key873] = _val874
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21757,9 +22012,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter861,viter862 in self.part_vals.items():
-        oprot.writeString(kiter861)
-        oprot.writeString(viter862)
+      for kiter875,viter876 in self.part_vals.items():
+        oprot.writeString(kiter875)
+        oprot.writeString(viter876)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -22814,11 +23069,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype866, _size863) = iprot.readListBegin()
-          for _i867 in xrange(_size863):
-            _elem868 = Index()
-            _elem868.read(iprot)
-            self.success.append(_elem868)
+          (_etype880, _size877) = iprot.readListBegin()
+          for _i881 in xrange(_size877):
+            _elem882 = Index()
+            _elem882.read(iprot)
+            self.success.append(_elem882)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22847,8 +23102,8 @@ class get_indexes_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter869 in self.success:
-        iter869.write(oprot)
+      for iter883 in self.success:
+        iter883.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23003,10 +23258,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype873, _size870) = iprot.readListBegin()
-          for _i874 in xrange(_size870):
-            _elem875 = iprot.readString()
-            self.success.append(_elem875)
+          (_etype887, _size884) = iprot.readListBegin()
+          for _i888 in xrange(_size884):
+            _elem889 = iprot.readString()
+            self.success.append(_elem889)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23029,8 +23284,8 @@ class get_index_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter876 in self.success:
-        oprot.writeString(iter876)
+      for iter890 in self.success:
+        oprot.writeString(iter890)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -25578,10 +25833,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype880, _size877) = iprot.readListBegin()
-          for _i881 in xrange(_size877):
-            _elem882 = iprot.readString()
-            self.success.append(_elem882)
+          (_etype894, _size891) = iprot.readListBegin()
+          for _i895 in xrange(_size891):
+            _elem896 = iprot.readString()
+            self.success.append(_elem896)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25604,8 +25859,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter883 in self.success:
-        oprot.writeString(iter883)
+      for iter897 in self.success:
+        oprot.writeString(iter897)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26293,10 +26548,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype887, _size884) = iprot.readListBegin()
-          for _i888 in xrange(_size884):
-            _elem889 = iprot.readString()
-            self.success.append(_elem889)
+          (_etype901, _size898) = iprot.readListBegin()
+          for _i902 in xrange(_size898):
+            _elem903 = iprot.readString()
+            self.success.append(_elem903)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26319,8 +26574,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter890 in self.success:
-        oprot.writeString(iter890)
+      for iter904 in self.success:
+        oprot.writeString(iter904)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26834,11 +27089,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype894, _size891) = iprot.readListBegin()
-          for _i895 in xrange(_size891):
-            _elem896 = Role()
-            _elem896.read(iprot)
-            self.success.append(_elem896)
+          (_etype908, _size905) = iprot.readListBegin()
+          for _i909 in xrange(_size905):
+            _elem910 = Role()
+            _elem910.read(iprot)
+            self.success.append(_elem910)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26861,8 +27116,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter897 in self.success:
-        iter897.write(oprot)
+      for iter911 in self.success:
+        iter911.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27371,10 +27626,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype901, _size898) = iprot.readListBegin()
-          for _i902 in xrange(_size898):
-            _elem903 = iprot.readString()
-            self.group_names.append(_elem903)
+          (_etype915, _size912) = iprot.readListBegin()
+          for _i916 in xrange(_size912):
+            _elem917 = iprot.readString()
+            self.group_names.append(_elem917)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27399,8 +27654,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter904 in self.group_names:
-        oprot.writeString(iter904)
+      for iter918 in self.group_names:
+        oprot.writeString(iter918)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27627,11 +27882,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype908, _size905) = iprot.readListBegin()
-          for _i909 in xrange(_size905):
-            _elem910 = HiveObjectPrivilege()
-            _elem910.read(iprot)
-            self.success.append(_elem910)
+          (_etype922, _size919) = iprot.readListBegin()
+          for _i923 in xrange(_size919):
+            _elem924 = HiveObjectPrivilege()
+            _elem924.read(iprot)
+            self.success.append(_elem924)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27654,8 +27909,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter911 in self.success:
-        iter911.write(oprot)
+      for iter925 in self.success:
+        iter925.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28153,10 +28408,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype915, _size912) = iprot.readListBegin()
-          for _i916 in xrange(_size912):
-            _elem917 = iprot.readString()
-            self.group_names.append(_elem917)
+          (_etype929, _size926) = iprot.readListBegin()
+          for _i930 in xrange(_size926):
+            _elem931 = iprot.readString()
+            self.group_names.append(_elem931)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28177,8 +28432,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter918 in self.group_names:
-        oprot.writeString(iter918)
+      for iter932 in self.group_names:
+        oprot.writeString(iter932)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -28233,10 +28488,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype922, _size919) = iprot.readListBegin()
-          for _i923 in xrange(_size919):
-            _elem924 = iprot.readString()
-            self.success.append(_elem924)
+          (_etype936, _size933) = iprot.readListBegin()
+          for _i937 in xrange(_size933):
+            _elem938 = iprot.readString()
+            self.success.append(_elem938)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28259,8 +28514,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter925 in self.success:
-        oprot.writeString(iter925)
+      for iter939 in self.success:
+        oprot.writeString(iter939)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 8940dff..ba525ed 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -10554,6 +10554,116 @@ class GetAllFunctionsResponse:
   def __ne__(self, other):
     return not (self == other)
 
+class TableMeta:
+  """
+  Attributes:
+   - dbName
+   - tableName
+   - tableType
+   - comments
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'dbName', None, None, ), # 1
+    (2, TType.STRING, 'tableName', None, None, ), # 2
+    (3, TType.STRING, 'tableType', None, None, ), # 3
+    (4, TType.STRING, 'comments', None, None, ), # 4
+  )
+
+  def __init__(self, dbName=None, tableName=None, tableType=None, comments=None,):
+    self.dbName = dbName
+    self.tableName = tableName
+    self.tableType = tableType
+    self.comments = comments
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.dbName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tableName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.tableType = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.comments = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TableMeta')
+    if self.dbName is not None:
+      oprot.writeFieldBegin('dbName', TType.STRING, 1)
+      oprot.writeString(self.dbName)
+      oprot.writeFieldEnd()
+    if self.tableName is not None:
+      oprot.writeFieldBegin('tableName', TType.STRING, 2)
+      oprot.writeString(self.tableName)
+      oprot.writeFieldEnd()
+    if self.tableType is not None:
+      oprot.writeFieldBegin('tableType', TType.STRING, 3)
+      oprot.writeString(self.tableType)
+      oprot.writeFieldEnd()
+    if self.comments is not None:
+      oprot.writeFieldBegin('comments', TType.STRING, 4)
+      oprot.writeString(self.comments)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.dbName is None:
+      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
+    if self.tableName is None:
+      raise TProtocol.TProtocolException(message='Required field tableName is unset!')
+    if self.tableType is None:
+      raise TProtocol.TProtocolException(message='Required field tableType is unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.dbName)
+    value = (value * 31) ^ hash(self.tableName)
+    value = (value * 31) ^ hash(self.tableType)
+    value = (value * 31) ^ hash(self.comments)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class MetaException(TException):
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 08b9b06..f943f2d 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -2422,6 +2422,31 @@ class GetAllFunctionsResponse
   ::Thrift::Struct.generate_accessors self
 end
 
+class TableMeta
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  DBNAME = 1
+  TABLENAME = 2
+  TABLETYPE = 3
+  COMMENTS = 4
+
+  FIELDS = {
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+    TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'},
+    COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableType is unset!') unless @tableType
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class MetaException < ::Thrift::Exception
   include ::Thrift::Struct, ::Thrift::Struct_Union
   def initialize(message=nil)

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index c613e4b..5fe54b5 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -366,6 +366,22 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_tables failed: unknown result')
     end
 
+    def get_table_meta(db_patterns, tbl_patterns, tbl_types)
+      send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+      return recv_get_table_meta()
+    end
+
+    def send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+      send_message('get_table_meta', Get_table_meta_args, :db_patterns => db_patterns, :tbl_patterns => tbl_patterns, :tbl_types => tbl_types)
+    end
+
+    def recv_get_table_meta()
+      result = receive_message(Get_table_meta_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_meta failed: unknown result')
+    end
+
     def get_all_tables(db_name)
       send_get_all_tables(db_name)
       return recv_get_all_tables()
@@ -2471,6 +2487,17 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_tables', seqid)
     end
 
+    def process_get_table_meta(seqid, iprot, oprot)
+      args = read_args(iprot, Get_table_meta_args)
+      result = Get_table_meta_result.new()
+      begin
+        result.success = @handler.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      end
+      write_result(result, oprot, 'get_table_meta', seqid)
+    end
+
     def process_get_all_tables(seqid, iprot, oprot)
       args = read_args(iprot, Get_all_tables_args)
       result = Get_all_tables_result.new()
@@ -4613,6 +4640,44 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Get_table_meta_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    DB_PATTERNS = 1
+    TBL_PATTERNS = 2
+    TBL_TYPES = 3
+
+    FIELDS = {
+      DB_PATTERNS => {:type => ::Thrift::Types::STRING, :name => 'db_patterns'},
+      TBL_PATTERNS => {:type => ::Thrift::Types::STRING, :name => 'tbl_patterns'},
+      TBL_TYPES => {:type => ::Thrift::Types::LIST, :name => 'tbl_types', :element => {:type => ::Thrift::Types::STRING}}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_table_meta_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TableMeta}},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_all_tables_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2e9afaf..3c40d6e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -110,7 +110,6 @@ import org.apache.thrift.transport.TTransportFactory;
 import javax.jdo.JDOException;
 
 import java.io.IOException;
-import java.lang.reflect.Field;
 import java.nio.ByteBuffer;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
@@ -1710,6 +1709,23 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return t;
     }
 
+    @Override
+    public List<TableMeta> get_table_meta(String dbnames, String tblNames, List<String> tblTypes)
+        throws MetaException, NoSuchObjectException {
+      List<TableMeta> t = null;
+      startTableFunction("get_table_metas", dbnames, tblNames);
+      Exception ex = null;
+      try {
+        t = getMS().getTableMeta(dbnames, tblNames, tblTypes);
+      } catch (Exception e) {
+        ex = e;
+        throw newMetaException(e);
+      } finally {
+        endFunction("get_table_metas", t != null, ex);
+      }
+      return t;
+    }
+
     /**
      * Equivalent of get_table, but does not log audits and fire pre-event listener.
      * Meant to be used for calls made by other hive classes, that are not using the
@@ -5249,6 +5265,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private static MetaException newMetaException(Exception e) {
+      if (e instanceof MetaException) {
+        return (MetaException)e;
+      }
       MetaException me = new MetaException(e.toString());
       me.initCause(e);
       return me;

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index f86ec45..c5e7a5f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -131,6 +131,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
@@ -1314,6 +1315,37 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     return null;
   }
 
+  @Override
+  public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+      throws MetaException {
+    try {
+      return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+    Map<String, TableMeta> sources = new LinkedHashMap<>();
+    Map<String, List<String>> dbTables = new LinkedHashMap<>();
+    for (TableMeta meta : metas) {
+      sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+      List<String> tables = dbTables.get(meta.getDbName());
+      if (tables == null) {
+        dbTables.put(meta.getDbName(), tables = new ArrayList<String>());
+      }
+      tables.add(meta.getTableName());
+    }
+    List<TableMeta> filtered = new ArrayList<>();
+    for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+      for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) {
+        filtered.add(sources.get(entry.getKey() + "." + table));
+      }
+    }
+    return filtered;
+  }
+
   /** {@inheritDoc} */
   @Override
   public List<String> getAllTables(String dbname) throws MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 9279cf5..aa96f77 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.metastore.api.TxnOpenException;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -161,6 +162,12 @@ public interface IMetaStoreClient {
       throws MetaException, TException, UnknownDBException;
 
   /**
+   * For quick GetTablesOperation
+   */
+  List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+      throws MetaException, TException, UnknownDBException;
+
+  /**
    * Get the names of all tables in the specified database.
    * @param dbName
    * @return List of table names.

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 9f2f5f4..803c6e7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -55,8 +55,6 @@ import javax.jdo.datastore.DataStoreCache;
 import javax.jdo.identity.IntIdentity;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.antlr.runtime.CommonTokenStream;
-import org.antlr.runtime.RecognitionException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configurable;
@@ -109,6 +107,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -143,12 +142,7 @@ import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
 import org.apache.hadoop.hive.metastore.model.MType;
 import org.apache.hadoop.hive.metastore.model.MVersionTable;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
-import org.apache.hadoop.hive.metastore.parser.FilterLexer;
-import org.apache.hadoop.hive.metastore.parser.FilterParser;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -729,6 +723,9 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public List<String> getDatabases(String pattern) throws MetaException {
+    if (pattern == null || pattern.equals("*")) {
+      return getAllDatabases();
+    }
     boolean commited = false;
     List<String> databases = null;
     Query query = null;
@@ -770,7 +767,28 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public List<String> getAllDatabases() throws MetaException {
-    return getDatabases(".*");
+    boolean commited = false;
+    List<String> databases = null;
+
+    String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MDatabase";
+    Query query = null;
+    
+    openTransaction();
+    try {
+      query = pm.newQuery(queryStr);
+      query.setResult("name");
+      databases = new ArrayList<String>((Collection<String>) query.execute());
+      commited = commitTransaction();
+    } finally {
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
+    }
+    Collections.sort(databases);
+    return databases;
   }
 
   private MType getMType(Type type) {
@@ -1050,6 +1068,84 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
+  public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+      throws MetaException {
+
+    boolean commited = false;
+    Query query = null;
+    List<TableMeta> metas = new ArrayList<TableMeta>();
+    try {
+      openTransaction();
+      // Take the pattern and split it on the | to get all the composing
+      // patterns
+      StringBuilder builder = new StringBuilder();
+      if (dbNames != null && !dbNames.equals("*")) {
+        appendPatternCondition(builder, "database.name", dbNames);
+      }
+      if (tableNames != null && !tableNames.equals("*")) {
+        appendPatternCondition(builder, "tableName", tableNames);
+      }
+      if (tableTypes != null && !tableTypes.isEmpty()) {
+        appendSimpleCondition(builder, "tableType", tableTypes.toArray(new String[0]));
+      }
+
+      query = pm.newQuery(MTable.class, builder.toString());
+      Collection<MTable> tables = (Collection<MTable>) query.execute();
+      for (MTable table : tables) {
+        TableMeta metaData = new TableMeta(
+            table.getDatabase().getName(), table.getTableName(), table.getTableType());
+        metaData.setComments(table.getParameters().get("comment"));
+        metas.add(metaData);
+      }
+      commited = commitTransaction();
+    } finally {
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
+    }
+    return metas;
+  }
+
+  private StringBuilder appendPatternCondition(StringBuilder builder,
+      String fieldName, String elements) {
+      elements = HiveStringUtils.normalizeIdentifier(elements);
+    return appendCondition(builder, fieldName, elements.split("\\|"), true);
+  }
+
+  private StringBuilder appendSimpleCondition(StringBuilder builder,
+      String fieldName, String[] elements) {
+    return appendCondition(builder, fieldName, elements, false);
+  }
+
+  private StringBuilder appendCondition(StringBuilder builder,
+      String fieldName, String[] elements, boolean pattern) {
+    if (builder.length() > 0) {
+      builder.append(" && ");
+    }
+    builder.append(" (");
+    int length = builder.length();
+    for (String element : elements) {
+      if (pattern) {
+        element = "(?i)" + element.replaceAll("\\*", ".*");
+      }
+      if (builder.length() > length) {
+        builder.append(" || ");
+      }
+      builder.append(fieldName);
+      if (pattern) {
+        builder.append(".matches(\"").append(element).append("\")");
+      } else {
+        builder.append(" == \"").append(element).append("\"");
+      }
+    }
+    builder.append(" )");
+    return builder;
+  }
+
+  @Override
   public List<String> getAllTables(String dbName) throws MetaException {
     return getTables(dbName, ".*");
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 4aa17a5..5b36b03 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -151,6 +152,9 @@ public interface RawStore extends Configurable {
   public List<String> getTables(String dbName, String pattern)
       throws MetaException;
 
+  public List<TableMeta> getTableMeta(
+      String dbNames, String tableNames, List<String> tableTypes) throws MetaException;
+
   /**
    * @param dbname
    *        The name of the database from which to retrieve the tables

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 5cc7c30..98e6c75 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
 import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hive.common.util.HiveStringUtils;
@@ -487,10 +487,7 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Table> tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName),
-          pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern)));
-      List<String> tableNames = new ArrayList<String>(tables.size());
-      for (Table table : tables) tableNames.add(table.getTableName());
+      List<String> tableNames = getTableNamesInTx(dbName, pattern);
       commit = true;
       return tableNames;
     } catch (IOException e) {
@@ -501,6 +498,41 @@ public class HBaseStore implements RawStore {
     }
   }
 
+  private List<String> getTableNamesInTx(String dbName, String pattern) throws IOException {
+    List<Table> tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName),
+        pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern)));
+    List<String> tableNames = new ArrayList<String>(tables.size());
+    for (Table table : tables) tableNames.add(table.getTableName());
+    return tableNames;
+  }
+
+  @Override
+  public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+      throws MetaException {
+    boolean commit = false;
+    openTransaction();
+    try {
+      List<TableMeta> metas = new ArrayList<>();
+      for (String dbName : getDatabases(dbNames)) {
+        for (Table table : getTableObjectsByName(dbName, getTableNamesInTx(dbName, tableNames))) {
+          if (tableTypes == null || tableTypes.contains(table.getTableType())) {
+            TableMeta metaData = new TableMeta(
+              table.getDbName(), table.getTableName(), table.getTableType());
+            metaData.setComments(table.getParameters().get("comment"));
+            metas.add(metaData);
+          }
+        }
+      }
+      commit = true;
+      return metas;
+    } catch (Exception e) {
+      LOG.error("Unable to get tables ", e);
+      throw new MetaException("Unable to get tables, " + e.getMessage());
+    } finally {
+      commitOrRoleBack(commit);
+    }
+  }
+
   @Override
   public List<Table> getTableObjectsByName(String dbname, List<String> tableNames) throws
       MetaException, UnknownDBException {
@@ -1660,7 +1692,7 @@ public class HBaseStore implements RawStore {
     openTransaction();
     try {
       List<ColumnStatistics> cs =
-          getHBase().getPartitionStatistics(dbName, tblName, partNames,  partVals, colNames);
+          getHBase().getPartitionStatistics(dbName, tblName, partNames, partVals, colNames);
       commit = true;
       return cs;
     } catch (IOException e) {


[47/55] [abbrv] hive git commit: HIVE-12309 : TableScan should colStats when available for better data size estimate (Ashutosh Chauhan via Prasanth J)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/tez/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/llapdecider.q.out b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
index 676a0e4..fd33181 100644
--- a/ql/src/test/results/clientpositive/tez/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
@@ -20,11 +20,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count(value)
                       keys: key (type: string)
@@ -251,11 +251,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
-                    Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count(value)
                       keys: key (type: string)
@@ -324,7 +324,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -343,7 +343,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -412,7 +412,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -431,7 +431,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -503,7 +503,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -522,7 +522,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -593,7 +593,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -611,7 +611,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -680,7 +680,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -698,7 +698,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -767,7 +767,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -786,7 +786,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -856,7 +856,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -874,7 +874,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -943,7 +943,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -962,7 +962,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1039,7 +1039,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(key) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1098,7 +1098,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(key) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1155,7 +1155,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(GenericUDFTestGetJavaString(key)) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE


[28/55] [abbrv] hive git commit: HIVE-12345 : Followup for HIVE-9013 : Hidden conf vars still visible through beeline (Sushanth Sowmyan, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-12345 : Followup for HIVE-9013 : Hidden conf vars still visible through beeline (Sushanth Sowmyan, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6ba735f0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6ba735f0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6ba735f0

Branch: refs/heads/spark
Commit: 6ba735f0ba192e310b52b19dbf89bab5f5492b9a
Parents: 0d3a75d
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Fri Nov 6 09:27:28 2015 -0800
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Fri Nov 6 09:27:28 2015 -0800

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java |  1 +
 .../test/java/org/apache/hive/jdbc/TestJdbcDriver2.java   | 10 +++++++++-
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6ba735f0/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 12276bf..7272ea4 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2911,6 +2911,7 @@ public class HiveConf extends Configuration {
     isSparkConfigUpdated = other.isSparkConfigUpdated;
     origProp = (Properties)other.origProp.clone();
     restrictList.addAll(other.restrictList);
+    hiddenSet.addAll(other.hiddenSet);
     modWhiteListPattern = other.modWhiteListPattern;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6ba735f0/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index ced454f..2b3fdf1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -1854,7 +1854,15 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
     assertEquals(SET_COLUMN_NAME, md.getColumnLabel(1));
 
     //check if there is data in the resultset
-    assertTrue("Nothing returned by set -v", res.next());
+    int numLines = 0;
+    while (res.next()){
+      numLines++;
+      String rline = res.getString(1);
+      assertFalse("set output must not contain hidden variables such as the metastore password:"+rline,
+          rline.contains(HiveConf.ConfVars.METASTOREPWD.varname) && !(rline.contains(HiveConf.ConfVars.HIVE_CONF_HIDDEN_LIST.varname)));
+        // the only conf allowed to have the metastore pwd keyname is the hidden list configuration value
+    }
+    assertTrue("Nothing returned by set -v", numLines > 0);
 
     res.close();
     stmt.close();


[11/55] [abbrv] hive git commit: Revert inadvertant addition of HiveConf.java.orig file

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9ba2cdfd/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
deleted file mode 100644
index b214344..0000000
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
+++ /dev/null
@@ -1,3372 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.conf;
-
-import com.google.common.base.Joiner;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
-import org.apache.hadoop.hive.conf.Validator.PatternSet;
-import org.apache.hadoop.hive.conf.Validator.RangeValidator;
-import org.apache.hadoop.hive.conf.Validator.RatioValidator;
-import org.apache.hadoop.hive.conf.Validator.StringSet;
-import org.apache.hadoop.hive.conf.Validator.TimeValidator;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.HiveCompat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.login.LoginException;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Hive Configuration.
- */
-public class HiveConf extends Configuration {
-  protected String hiveJar;
-  protected Properties origProp;
-  protected String auxJars;
-  private static final Logger l4j = LoggerFactory.getLogger(HiveConf.class);
-  private static boolean loadMetastoreConfig = false;
-  private static boolean loadHiveServer2Config = false;
-  private static URL hiveDefaultURL = null;
-  private static URL hiveSiteURL = null;
-  private static URL hivemetastoreSiteUrl = null;
-  private static URL hiveServer2SiteUrl = null;
-
-  private static byte[] confVarByteArray = null;
-
-
-  private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
-  private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
-  private final List<String> restrictList = new ArrayList<String>();
-  private final Set<String> hiddenSet = new HashSet<String>();
-
-  private Pattern modWhiteListPattern = null;
-  private volatile boolean isSparkConfigUpdated = false;
-  private static final int LOG_PREFIX_LENGTH = 64;
-
-  public boolean getSparkConfigUpdated() {
-    return isSparkConfigUpdated;
-  }
-
-  public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
-    this.isSparkConfigUpdated = isSparkConfigUpdated;
-  }
-
-  static {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      classLoader = HiveConf.class.getClassLoader();
-    }
-
-    hiveDefaultURL = classLoader.getResource("hive-default.xml");
-
-    // Look for hive-site.xml on the CLASSPATH and log its location if found.
-    hiveSiteURL = classLoader.getResource("hive-site.xml");
-    hivemetastoreSiteUrl = classLoader.getResource("hivemetastore-site.xml");
-    hiveServer2SiteUrl = classLoader.getResource("hiveserver2-site.xml");
-
-    for (ConfVars confVar : ConfVars.values()) {
-      vars.put(confVar.varname, confVar);
-    }
-  }
-
-  /**
-   * Metastore related options that the db is initialized against. When a conf
-   * var in this is list is changed, the metastore instance for the CLI will
-   * be recreated so that the change will take effect.
-   */
-  public static final HiveConf.ConfVars[] metaVars = {
-      HiveConf.ConfVars.METASTOREWAREHOUSE,
-      HiveConf.ConfVars.METASTOREURIS,
-      HiveConf.ConfVars.METASTORE_SERVER_PORT,
-      HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
-      HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
-      HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
-      HiveConf.ConfVars.METASTOREPWD,
-      HiveConf.ConfVars.METASTORECONNECTURLHOOK,
-      HiveConf.ConfVars.METASTORECONNECTURLKEY,
-      HiveConf.ConfVars.METASTORESERVERMINTHREADS,
-      HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
-      HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
-      HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
-      HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
-      HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
-      HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
-      HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
-      HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
-      HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
-      HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
-      HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
-      HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
-      HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
-      HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
-      HiveConf.ConfVars.METASTORE_AUTO_CREATE_SCHEMA,
-      HiveConf.ConfVars.METASTORE_AUTO_START_MECHANISM_MODE,
-      HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
-      HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
-      HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
-      HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
-      HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
-      HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
-      HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
-      HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
-      HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
-      HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
-      HiveConf.ConfVars.METASTORE_FILTER_HOOK,
-      HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
-      HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
-      HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
-      HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
-      HiveConf.ConfVars.METASTORE_INIT_HOOKS,
-      HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
-      HiveConf.ConfVars.HMSHANDLERATTEMPTS,
-      HiveConf.ConfVars.HMSHANDLERINTERVAL,
-      HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
-      HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
-      HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
-      HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
-      HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
-      HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
-      HiveConf.ConfVars.HIVE_TXN_MANAGER,
-      HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
-      HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
-      HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
-      HiveConf.ConfVars.METASTORE_FASTPATH,
-      HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_TIME_TO_LIVE,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_WRITER_WAIT,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_READER_WAIT,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_FULL,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_CLEAN_UNTIL,
-      HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL
-      };
-
-  /**
-   * User configurable Metastore vars
-   */
-  public static final HiveConf.ConfVars[] metaConfVars = {
-      HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
-      HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT
-  };
-
-  static {
-    for (ConfVars confVar : metaConfVars) {
-      metaConfs.put(confVar.varname, confVar);
-    }
-  }
-
-  /**
-   * dbVars are the parameters can be set per database. If these
-   * parameters are set as a database property, when switching to that
-   * database, the HiveConf variable will be changed. The change of these
-   * parameters will effectively change the DFS and MapReduce clusters
-   * for different databases.
-   */
-  public static final HiveConf.ConfVars[] dbVars = {
-    HiveConf.ConfVars.HADOOPBIN,
-    HiveConf.ConfVars.METASTOREWAREHOUSE,
-    HiveConf.ConfVars.SCRATCHDIR
-  };
-
-  /**
-   * ConfVars.
-   *
-   * These are the default configuration properties for Hive. Each HiveConf
-   * object is initialized as follows:
-   *
-   * 1) Hadoop configuration properties are applied.
-   * 2) ConfVar properties with non-null values are overlayed.
-   * 3) hive-site.xml properties are overlayed.
-   *
-   * WARNING: think twice before adding any Hadoop configuration properties
-   * with non-null values to this list as they will override any values defined
-   * in the underlying Hadoop configuration.
-   */
-  public static enum ConfVars {
-    // QL execution stuff
-    SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
-    PLAN("hive.exec.plan", "", ""),
-    PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
-        "Query plan format serialization between client and task nodes. \n" +
-        "Two supported values are : kryo and javaXML. Kryo is default."),
-    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
-        "Directory name that will be created inside table locations in order to support HDFS encryption. " +
-        "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
-        "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
-    SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
-        "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
-        "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
-        "with ${hive.scratch.dir.permission}."),
-    LOCALSCRATCHDIR("hive.exec.local.scratchdir",
-        "${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
-        "Local scratch space for Hive jobs"),
-    DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
-        "${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
-        "Temporary local directory for added resources in the remote file system."),
-    SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
-        "The permission for the user specific scratch directories that get created."),
-    SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
-    SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
-        "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
-        "separate JVM (true recommended) or not. \n" +
-        "Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
-    SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
-        "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
-        "This prevents runaway scripts from filling logs partitions to capacity"),
-    ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
-        "When enabled, this option allows a user script to exit successfully without consuming \n" +
-        "all the data from the standard input."),
-    STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
-        "Streaming jobs that log to standard error with this prefix can log counter or status information."),
-    STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
-        "Enable consumption of status and counter messages for streaming jobs."),
-    COMPRESSRESULT("hive.exec.compress.output", false,
-        "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
-        "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
-    COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
-        "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
-        "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
-    COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
-    COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
-    BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
-        "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
-    MAXREDUCERS("hive.exec.reducers.max", 1009,
-        "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
-        "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
-    PREEXECHOOKS("hive.exec.pre.hooks", "",
-        "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
-        "A pre-execution hook is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    POSTEXECHOOKS("hive.exec.post.hooks", "",
-        "Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
-        "A post-execution hook is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    ONFAILUREHOOKS("hive.exec.failure.hooks", "",
-        "Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
-        "An on-failure hook is specified as the name of Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
-        "Comma-separated list of hooks to be invoked for each query which can \n" +
-        "tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
-        "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
-    CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
-        "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
-        "A client stats publisher is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
-    EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
-    EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
-        "How many jobs at most can be executed in parallel"),
-    HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
-        "Whether speculative execution for reducers should be turned on. "),
-    HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
-        "The interval with which to poll the JobTracker for the counters the running job. \n" +
-        "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
-    DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
-        "Whether or not to allow dynamic partitions in DML/DDL."),
-    DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
-        "In strict mode, the user must specify at least one static partition\n" +
-        "in case the user accidentally overwrites all partitions.\n" +
-        "In nonstrict mode all partitions are allowed to be dynamic."),
-    DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
-        "Maximum number of dynamic partitions allowed to be created in total."),
-    DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
-        "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
-    MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
-        "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
-    DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
-        "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
-        "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
-        "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
-    DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
-
-    // Whether to show a link to the most failed task + debugging tips
-    SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
-        "If a job fails, whether to provide a link in the CLI to the task with the\n" +
-        "most failures, along with debugging hints if applicable."),
-    JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
-        "Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
-        "for each failed job should be stored in the SessionState"),
-    JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
-    TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
-    OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
-        "String used as a file extension for output files. \n" +
-        "If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
-
-    HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
-
-    HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
-        true),
-
-    LOCALMODEAUTO("hive.exec.mode.local.auto", false,
-        "Let Hive determine whether to run in local mode automatically"),
-    LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
-        "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
-    LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
-        "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
-
-    DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
-        "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"),
-
-    HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
-
-    HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
-        "maximum number of lines for footer user can define for a table file"),
-
-    HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
-        "Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
-        "Table alias will be added to column names for queries of type \"select *\" or \n" +
-        "if query explicitly uses table alias \"select r1.x..\"."),
-
-    // Hadoop Configuration Properties
-    // Properties with null values are ignored and exist only for the purpose of giving us
-    // a symbolic name to reference in the Hive source code. Properties with non-null
-    // values will override any values set in the underlying Hadoop configuration.
-    HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
-    HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
-        "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
-    HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null, "", true),
-    HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null, "", true),
-    HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null, "", true),
-    HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false, "", true),
-    MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L, "", true),
-    MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L, "", true),
-    MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L, "", true),
-    MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L, "", true),
-    // The number of reduce tasks per job. Hadoop sets this value to 1 by default
-    // By setting this property to -1, Hive will automatically determine the correct
-    // number of reducers.
-    HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1, "", true),
-    HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null, "", true),
-    HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true, "", true),
-    MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false, "", true),
-    MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false, "", true),
-
-    // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
-    METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
-        "location of default database for the warehouse"),
-    METASTOREURIS("hive.metastore.uris", "",
-        "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
-
-    METASTORE_FASTPATH("hive.metastore.fastpath", false,
-        "Used to avoid all of the proxies and object copies in the metastore.  Note, if this is " +
-            "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
-            "undefined and most likely undesired behavior will result"),
-    METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " +
-        "objects we will place in the hbase metastore catalog cache.  The objects will be divided up by " +
-        "types that we need to cache."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000,
-        "Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000,
-        "Maximum number of partitions that are aggregated per cache node."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability",
-        (float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1,
-        "Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."),
-    METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for a cached node to be active in the cache before they become stale."),
-    METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
-    METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
-         "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
-    METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9,
-         "Maximum cache full % after which the cache cleaner thread kicks in."),
-    METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8,
-          "The cleaner thread cleans until cache reaches this % full size."),
-    METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class",
-        "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection",
-        "Class used to connection to HBase"),
-    METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries",
-        10000, "How many in stats objects to cache in memory"),
-    METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds stats objects live in memory after they are read from HBase."),
-    METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY(
-        "hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "How often the stats cache scans its HBase entries and looks for expired entries"),
-    METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds stats entries live in HBase cache after they are created.  They may be" +
-            " invalided by updates or partition drops before this.  Default is one week."),
-
-    METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
-        "Number of retries while opening a connection to metastore"),
-    METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
-        "Number of retries upon failure of Thrift metastore calls"),
-    METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
-    METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for the client to wait between consecutive connection attempts"),
-    METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MetaStore Client socket timeout in seconds"),
-    METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
-        "reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
-        "has an infinite lifetime."),
-    METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
-        "password to use against metastore database"),
-    METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
-        "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
-    METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
-        "Set this to true if multiple threads access metastore through JDO concurrently."),
-    METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
-        "jdbc:derby:;databaseName=metastore_db;create=true",
-        "JDBC connect string for a JDBC metastore"),
-    HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
-        "The number of times to retry a HMSHandler call if there were a connection error."),
-    HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
-    HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
-        "Whether to force reloading of the HMSHandler configuration (including\n" +
-        "the connection URL, before the next metastore query that accesses the\n" +
-        "datastore. Once reloaded, this value is reset to false. Used for\n" +
-        "testing only."),
-    METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024,
-        "Maximum message size in bytes a HMS will accept."),
-    METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
-        "Minimum number of worker threads in the Thrift server's pool."),
-    METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
-        "Maximum number of worker threads in the Thrift server's pool."),
-    METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
-        "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
-
-    METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
-        "_INTERMEDIATE_ORIGINAL",
-        "Intermediate dir suffixes used for archiving. Not important what they\n" +
-        "are, as long as collisions are avoided"),
-    METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
-        "_INTERMEDIATE_ARCHIVED", ""),
-    METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
-        "_INTERMEDIATE_EXTRACTED", ""),
-    METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
-        "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
-    METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
-        "hive-metastore/_HOST@EXAMPLE.COM",
-        "The service principal for the metastore Thrift server. \n" +
-        "The special string _HOST will be replaced automatically with the correct host name."),
-    METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
-        "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
-    METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
-        "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
-    METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
-        "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
-        "Setting it to true will break compatibility with older clients running TBinaryProtocol."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
-        "org.apache.hadoop.hive.thrift.MemoryTokenStore",
-        "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
-        "hive.cluster.delegation.token.store.zookeeper.connectString", "",
-        "The ZooKeeper token store connect string. You can re-use the configuration value\n" +
-        "set in hive.zookeeper.quorum, by leaving this parameter unset."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
-        "hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
-        "The root path for token store data. Note that this is used by both HiveServer2 and\n" +
-        "MetaStore to store delegation Token. One directory gets created for each of them.\n" +
-        "The final directory names would have the servername appended to it (HIVESERVER2,\n" +
-        "METASTORE)."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
-        "hive.cluster.delegation.token.store.zookeeper.acl", "",
-        "ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
-        "sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa\n" +
-        "Defaults to all permissions for the hiveserver2/metastore process user."),
-    METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
-        "List of comma separated metastore object types that should be pinned in the cache"),
-    METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP",
-        "Specify connection pool library for datanucleus"),
-    METASTORE_VALIDATE_TABLES("datanucleus.validateTables", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_VALIDATE_COLUMNS("datanucleus.validateColumns", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_VALIDATE_CONSTRAINTS("datanucleus.validateConstraints", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
-    METASTORE_AUTO_CREATE_SCHEMA("datanucleus.autoCreateSchema", true,
-        "creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once"),
-    METASTORE_FIXED_DATASTORE("datanucleus.fixedDatastore", false, ""),
-    METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false,
-        "Enforce metastore schema version consistency.\n" +
-        "True: Verify that version information stored in metastore matches with one from Hive jars.  Also disable automatic\n" +
-        "      schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
-        "      proper metastore schema migration. (Default)\n" +
-        "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
-    METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", true,
-      "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
-      " enabled the MS will be unusable."),
-    METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked",
-        "throw exception if metadata tables are incorrect"),
-    METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
-        "Default transaction isolation level for identity generation."),
-    METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
-        "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
-    METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
-    METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
-        "Name of the identifier factory to use when generating table/column names etc. \n" +
-        "'datanucleus1' is used for backward compatibility with DataNucleus v1"),
-    METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
-    METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
-        "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
-    METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
-        "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
-        "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
-        "but it may also cause higher memory requirement at the client side."),
-    METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
-        "hive.metastore.batch.retrieve.table.partition.max", 1000,
-        "Maximum number of objects that metastore internally retrieves in one batch."),
-
-    METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
-        "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
-        "An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
-    METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
-        "List of comma separated listeners for metastore events."),
-    METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""),
-    METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "time after which events will be removed from the database listener queue"),
-    METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
-        "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
-        "for operations like drop-partition (disallow the drop-partition if the user in\n" +
-        "question doesn't have permissions to delete the corresponding directory\n" +
-        "on the storage)."),
-    METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Frequency at which timer task runs to purge expired events in metastore."),
-    METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Duration after which events expire from events table"),
-    METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
-        "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
-        "the client's reported user and group permissions. Note that this property must be set on \n" +
-        "both the client and server sides. Further note that its best effort. \n" +
-        "If client sets its to true and server sets it to false, client setting will be ignored."),
-    METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
-        "Partition names will be checked against this regex pattern and rejected if not matched."),
-
-    METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
-        "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
-        "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
-        "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
-        "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
-        "is also irrelevant."),
-    METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
-        "Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
-        "DataNucleus for certain read paths. This can improve metastore performance when\n" +
-        "fetching many partitions or column statistics by orders of magnitude; however, it\n" +
-        "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
-        "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
-        "work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
-        "metastore is backed by MongoDB), you might want to disable this to save the\n" +
-        "try-and-fall-back cost."),
-    METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
-        "Batch size for partition and other object retrieval from the underlying DB in direct\n" +
-        "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
-        "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
-        "may impede performance. -1 means no batching, 0 means automatic batching."),
-    METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
-        "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
-        "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
-        "select query has incorrect syntax or something similar inside a transaction, the\n" +
-        "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
-        "should disable the usage of direct SQL inside transactions if that happens in your case."),
-    METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
-        "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
-        "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
-        "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
-        "pruning is the correct behaviour"),
-    METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
-        "hive.metastore.disallow.incompatible.col.type.changes", false,
-        "If true (default is false), ALTER TABLE operations which change the type of a\n" +
-        "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
-        "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
-        "datatypes can be converted from string to any type. The map is also serialized as\n" +
-        "a string, which can be read as a string as well. However, with any binary\n" +
-        "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
-        "when subsequently trying to access old partitions.\n" +
-        "\n" +
-        "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
-        "not blocked.\n" +
-        "\n" +
-        "See HIVE-4409 for more details."),
-
-    NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
-        "Default property values for newly created tables"),
-    DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
-        "Table Properties to copy over when executing a Create Table Like."),
-    METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
-        "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
-        "This class is used to store and retrieval of raw metadata objects such as table, database"),
-    METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
-        "Driver class name for a JDBC metastore"),
-    METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
-        "org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
-        "class implementing the jdo persistence"),
-    METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
-        "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
-    METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
-        "Detaches all objects from session so that they can be used after transaction is committed"),
-    METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
-        "Reads outside of transactions"),
-    METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
-        "Username to use against metastore database"),
-    METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
-        "List of comma separated listeners for the end of metastore functions."),
-    METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
-        "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
-        "* implies all the keys will get inherited."),
-    METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
-        "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
-        + "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
-    FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
-        " to fire events for DML operations"),
-    METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
-        "Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
-            "or drops partitions iteratively"),
-
-    METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", true,
-        "Whether aggregate stats caching is enabled or not."),
-    METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
-        "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
-        "Maximum number of partitions that are aggregated per cache node."),
-    METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
-        "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
-        "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
-    METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for a cached node to be active in the cache before they become stale."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
-        "Maximum cache full % after which the cache cleaner thread kicks in."),
-    METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
-        "The cleaner thread cleans until cache reaches this % full size."),
-    METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
-
-    // Parameters for exporting metadata on table drop (requires the use of the)
-    // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
-    METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
-        "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
-        "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
-        "metadata being exported to the current user's home directory on HDFS."),
-    MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
-        "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
-        "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
-        "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
-
-    // CLI
-    CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
-    CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
-        "Whether to include the current database in the Hive prompt."),
-    CLIPROMPT("hive.cli.prompt", "hive",
-        "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
-        "Variable substitution will only be invoked at the Hive CLI startup."),
-    CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
-        "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
-        "If the value of this property is -1, then Hive will use the auto-detected terminal width."),
-
-    HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
-
-    // Things we log in the jobconf
-
-    // session identifier
-    HIVESESSIONID("hive.session.id", "", ""),
-    // whether session is running in silent mode or not
-    HIVESESSIONSILENT("hive.session.silent", false, ""),
-
-    HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
-        "Whether to log Hive query, query plan, runtime statistics etc."),
-
-    HIVEQUERYSTRING("hive.query.string", "",
-        "Query being executed (might be multiple per a session)"),
-
-    HIVEQUERYID("hive.query.id", "",
-        "ID for query being executed (might be multiple per a session)"),
-
-    HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
-
-    // hive jar
-    HIVEJAR("hive.jar.path", "",
-        "The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
-    HIVEAUXJARS("hive.aux.jars.path", "",
-        "The location of the plugin jars that contain implementations of user defined functions and serdes."),
-
-    // reloadable jars
-    HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
-        "Jars can be renewed by executing reload command. And these jars can be "
-            + "used as the auxiliary classes like creating a UDF or SerDe."),
-
-    // hive added files and jars
-    HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
-    HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
-    HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
-
-    HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
-
-    // for hive script operator
-    HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "How long to run autoprogressor for the script/UDTF operators.\n" +
-        "Set to 0 for forever."),
-    HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
-        "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
-        "to avoid the task getting killed because of inactivity.  Hive sends progress information when the script is \n" +
-        "outputting to stderr.  This option removes the need of periodically producing stderr messages, \n" +
-        "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
-    HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
-        "Name of the environment variable that holds the unique script operator ID in the user's \n" +
-        "transform function (the custom mapper/reducer that the user has specified in the query)"),
-    HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
-        "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
-    HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
-        "hive.txn.valid.txns,hive.script.operator.env.blacklist",
-        "Comma separated list of keys from the configuration file not to convert to environment " +
-        "variables when envoking the script operator"),
-    HIVEMAPREDMODE("hive.mapred.mode", "nonstrict",
-        "The mode in which the Hive operations are being performed. \n" +
-        "In strict mode, some risky queries are not allowed to run. They include:\n" +
-        "  Cartesian Product.\n" +
-        "  No partition being picked up for a query.\n" +
-        "  Comparing bigints and strings.\n" +
-        "  Comparing bigints and doubles.\n" +
-        "  Orderby without limit."),
-    HIVEALIAS("hive.alias", "", ""),
-    HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
-    HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
-    HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
-        "How many rows in the right-most join operand Hive should buffer before emitting the join result."),
-    HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
-        "How many rows in the joining tables (except the streaming table) should be cached in memory."),
-
-    // CBO related
-    HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
-    HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
-    HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
-                                 + "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
-    HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
-    HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;"
-                                                                  + " expressed as multiple of CPU cost"),
-    HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
-                                                                             + " expressed as multiple of NETWORK cost"),
-    HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
-                                                                           + " expressed as multiple of NETWORK cost"),
-    HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
-                                                                 + " expressed as multiple of Local FS write cost"),
-    HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
-                                                                 + " expressed as multiple of Local FS read cost"),
-    AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
-
-    // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
-    // need to remove by hive .13. Also, do not change default (see SMB operator)
-    HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
-
-    HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
-        "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
-        "because memory-optimized hashtable cannot be serialized."),
-    HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
-        (float) 0.5, "Probing space percentage of the optimized hashtable"),
-    HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
-        "grace hash join as the join method for mapjoin. Tez only."),
-    HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
-        "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
-        "This number should be power of 2."),
-    HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
-        "Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
-    HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
-        "Hybrid grace hash join, the minimum number of partitions to create."),
-    HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
-        "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
-        "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
-        "joins unnecessary memory will be allocated and then trimmed."),
-
-    HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
-        "How many rows with the same key value should be cached in memory per smb joined table."),
-    HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
-        "Number of rows after which size of the grouping keys/aggregation classes is performed"),
-    HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
-        "Portion of total memory to be used by map-side group aggregation hash table"),
-    HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
-        "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
-    HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
-        "The max memory to be used by map-side group aggregation hash table.\n" +
-        "If the memory usage is higher than this number, force to flush data"),
-    HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5,
-        "Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number. \n" +
-        "Set to 1 to make sure hash aggregation is never turned off."),
-    HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
-        "Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has \n" +
-        "common group by keys, it will be optimized to generate single M/R job."),
-    HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false,
-        "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
-        "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
-        "is that it limits the number of mappers to the number of files."),
-    HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false,
-        "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
-        "the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan\n" +
-        "is not converted, but a query property is set to denote the same."),
-    HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
-        "Whether to enable using Column Position Alias in Group By or Order By"),
-    HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
-        "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
-        "For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
-        "4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
-        "This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
-        "and map-side aggregation does not do a very good job. \n" +
-        "\n" +
-        "This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
-        "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
-        "assumption that the original group by will reduce the data size."),
-
-    // Max filesize used to do a single copy (after that, distcp is used)
-    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
-        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
-        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
-
-    // for hive udtf operator
-    HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
-        "Whether Hive should automatically send progress information to TaskTracker \n" +
-        "when using UDTF's to prevent the task getting killed because of inactivity.  Users should be cautious \n" +
-        "because this may prevent TaskTracker from killing tasks with infinite loops."),
-
-    HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"),
-        "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
-    HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
-  new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
-  "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
-  "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
-  "for all tables."),
-    HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile"),
-        "Default file format for storing result of the query."),
-    HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
-
-    // default serde for rcfile
-    HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
-        "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
-        "The default SerDe Hive will use for the RCFile format"),
-
-    HIVEDEFAULTSERDE("hive.default.serde",
-        "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-        "The default SerDe Hive will use for storage formats that do not specify a SerDe."),
-
-    SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
-        "org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
-        "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
-        "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
-        "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
-        "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
-        "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
-        "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
-        "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
-        "SerDes retrieving schema from metastore. This is an internal parameter."),
-
-    HIVEHISTORYFILELOC("hive.querylog.location",
-        "${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
-        "Location of Hive run time structured log file"),
-
-    HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
-        "Whether to log the plan's progress every time a job's progress is checked.\n" +
-        "These logs are written to the location specified by hive.querylog.location"),
-
-    HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "The interval to wait between logging the plan's progress.\n" +
-        "If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
-        "the progress is logged regardless of this value.\n" +
-        "The actual interval will be the ceiling of (this value divided by the value of\n" +
-        "hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
-        "I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
-        "logged less frequently than specified.\n" +
-        "This only has an effect if hive.querylog.enable.plan.progress is set to true."),
-
-    HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-        "The default SerDe for transmitting input data to and reading output data from the user scripts. "),
-    HIVESCRIPTRECORDREADER("hive.script.recordreader",
-        "org.apache.hadoop.hive.ql.exec.TextRecordReader",
-        "The default record reader for reading data from the user scripts. "),
-    HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
-        "org.apache.hadoop.hive.ql.exec.TextRecordWriter",
-        "The default record writer for writing data to the user scripts. "),
-    HIVESCRIPTESCAPE("hive.transform.escape.input", false,
-        "This adds an option to escape special chars (newlines, carriage returns and\n" +
-        "tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
-        "can contain data that contains special characters."),
-    HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
-        "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
-        "The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
-
-    // HWI
-    HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0", "This is the host address the Hive Web Interface will listen on"),
-    HIVEHWILISTENPORT("hive.hwi.listen.port", "9999", "This is the port the Hive Web Interface will listen on"),
-    HIVEHWIWARFILE("hive.hwi.war.file", "${env:HWI_WAR_FILE}",
-        "This sets the path to the HWI war file, relative to ${HIVE_HOME}. "),
-
-    HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
-
-    //small table file size
-    HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
-        "The threshold for the input file size of the small tables; if the file size is smaller \n" +
-        "than this threshold, it will try to convert the common join into map join"),
-
-    HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
-        "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
-
-    // test mode in hive mode
-    HIVETESTMODE("hive.test.mode", false,
-        "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
-        false),
-    HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
-        "In test mode, specfies prefixes for the output table", false),
-    HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
-        "In test mode, specfies sampling frequency for table, which is not bucketed,\n" +
-        "For example, the following query:\n" +
-        "  INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
-        "would be converted to\n" +
-        "  INSERT OVERWRITE TABLE test_dest\n" +
-        "  SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
-    HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
-        "In test mode, specifies comma separated table names which would not apply sampling", false),
-    HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
-    HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
-    HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
-
-    HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
-        "Merge small files at the end of a map-only job"),
-    HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
-        "Merge small files at the end of a map-reduce job"),
-    HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
-    HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
-    HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
-        "Size of merged files at the end of the job"),
-    HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
-        "When the average output file size of a job is less than this number, Hive will start an additional \n" +
-        "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
-        "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
-    HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
-    HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
-        "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
-        "while writing a table with ORC file format, enabling this config will do stripe-level\n" +
-        "fast merge for small ORC files. Note that enabling this config will not honor the\n" +
-        "padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
-
-    HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
-        "If this is set the header for RCFiles will simply be RCF.  If this is not\n" +
-        "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
-        "by the input and output RCFile formats."),
-    HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
-
-    HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
-    HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
-    HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
-    HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""),   // 4M
-
-    PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
-        "Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
-        "It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
-        "This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
-    HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", true,
-      "Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
-      "on reading parquet files from other tools"),
-    HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
-        "Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
-        "Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
-    HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f,
-        "Maximum fraction of heap that can be used by ORC file writers"),
-    HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null,
-        "Define the version of the file to write. Possible values are 0.11 and 0.12.\n" +
-        "If this parameter is not defined, ORC will use the run length encoding (RLE)\n" +
-        "introduced in Hive 0.12. Any value other than 0.11 results in the 0.12 encoding."),
-    HIVE_ORC_DEFAULT_STRIPE_SIZE("hive.exec.orc.default.stripe.size",
-        64L * 1024 * 1024,
-        "Define the default ORC stripe size, in bytes."),
-    HIVE_ORC_DEFAULT_BLOCK_SIZE("hive.exec.orc.default.block.size", 256L * 1024 * 1024,
-        "Define the default file system block size for ORC files."),
-
-    HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD("hive.exec.orc.dictionary.key.size.threshold", 0.8f,
-        "If the number of keys in a dictionary is greater than this fraction of the total number of\n" +
-        "non-null rows, turn off dictionary encoding.  Use 1 to always use dictionary encoding."),
-    HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride", 10000,
-        "Define the default ORC index stride in number of rows. (Stride is the number of rows\n" +
-        "an index entry represents.)"),
-    HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK("hive.orc.row.index.stride.dictionary.check", true,
-        "If enabled dictionary check will happen after first row index stride (default 10000 rows)\n" +
-        "else dictionary check will happen before writing first stripe. In both cases, the decision\n" +
-        "to use dictionary or not will be retained thereafter."),
-    HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024,
-        "Define the default ORC buffer size, in bytes."),
-    HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true,
-        "Define the default block padding, which pads stripes to the HDFS block boundaries."),
-    HIVE_ORC_BLOCK_PADDING_TOLERANCE("hive.exec.orc.block.padding.tolerance", 0.05f,
-        "Define the tolerance for block padding as a decimal fraction of stripe size (for\n" +
-        "example, the default value 0.05 is 5% of the stripe size). For the defaults of 64Mb\n" +
-        "ORC stripe and 256Mb HDFS blocks, the default block padding tolerance of 5% will\n" +
-        "reserve a maximum of 3.2Mb for padding within the 256Mb block. In that case, if the\n" +
-        "available size within the block is more than 3.2Mb, a new smaller stripe will be\n" +
-        "inserted to fit within that space. This will make sure that no stripe written will\n" +
-        "cross block boundaries and cause remote reads within a node local task."),
-    HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB", "Define the default compression codec for ORC file"),
-
-    HIVE_ORC_ENCODING_STRATEGY("hive.exec.orc.encoding.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"),
-        "Define the encoding strategy to use while writing data. Changing this will\n" +
-        "only affect the light weight encoding for integers. This flag will not\n" +
-        "change the compression level of higher level compression codec (like ZLIB)."),
-
-    HIVE_ORC_COMPRESSION_STRATEGY("hive.exec.orc.compression.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"),
-         "Define the compression strategy to use while writing data. \n" +
-         "This changes the compression level of higher level compression codec (like ZLIB)."),
-
-    HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
-        "This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
-        " as opposed to query execution (split generation does not read or cache file footers)." +
-        " ETL strategy is used when spending little more time in split generation is acceptable" +
-        " (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
-        " based on heuristics."),
-
-    HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
-        "Whether to enable using file metadata cache in metastore for ORC file footers."),
-
-    HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
-        "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
-        "data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
-    HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
-        "Include file ID in splits on file systems thaty support it."),
-    HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
-        "Max cache size for keeping meta info about orc splits cached in the client."),
-    HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
-        "How many threads orc should use to create splits in parallel."),
-    HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false,
-        "If ORC reader encounters corrupt data, this value will be used to determine\n" +
-        "whether to skip the corrupt data or throw exception. The default behavior is to throw exception."),
-
-    HIVE_ORC_ZEROCOPY("hive.exec.orc.zerocopy", false,
-        "Use zerocopy reads with ORC. (This requires Hadoop 2.3 or later.)"),
-
-    HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
-        "LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
-        "'1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
-        "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
-        "boolean literal."),
-
-    HIVESKEWJOIN("hive.optimize.skewjoin", false,
-        "Whether to enable skew join optimization. \n" +
-        "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
-        "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
-        "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
-        "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
-        "map-join."),
-    HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
-        "Whether to enable dynamically partitioned hash join optimization. \n" +
-        "This setting is also dependent on enabling hive.auto.convert.join"),
-    HIVECONVERTJOIN("hive.auto.convert.join", true,
-        "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
-    HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
-        "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
-        "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
-        "specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
-
-    HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
-        10000000L,
-        "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
-        "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
-        "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
-    HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
-        "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
-        "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
-        "Currently, this is not working with vectorization or tez execution engine."),
-    HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
-        "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
-        "we think the key as a skew join key. "),
-    HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
-        "Determine the number of map task used in the follow up map join job for a skew join.\n" +
-        "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
-    HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
-        "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
-        "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
-
-    HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
-        "Send a heartbeat after this interval - used by mapjoin and filter operators"),
-    HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
-        "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
-    HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
-        "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
-    HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
-        "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
-    HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
-        "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
-        "Insert queries are not restricted by this limit."),
-    HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f,
-        "The max memory to be used for hash in RS operator for top K selection."),
-    HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1,
-        "This controls how many partitions can be scanned for each partitioned table.\n" +
-        "The default value \"-1\" means no limit."),
-
-    HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 1.0f,
-        "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
-        " of the number of keys is divided by this value. If the value is 0, statistics are not used" +
-        "and hive.hashtable.initialCapacity is used instead."),
-    HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
-        "mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0"),
-    HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
-    HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
-        "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
-        "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
-        "the local task will abort by itself. It means the data of the small table is too large to be held in memory."),
-    HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
-        "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
-        "If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
-        "It means the data of the small table is too large to be held in memory."),
-    HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
-        "The number means after how many rows processed it needs to check the memory usage"),
-
-    HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
-
-    HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
-        "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
-    HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-        "The default input format for tez. Tez groups splits in the AM."),
-
-    HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
-        "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
-    HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
-        "By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
-        "This can be used to overwrite."),
-    HIVETEZJAVAOPTS("hive.tez.java.opts", null,
-        "By default Tez will use the Java options from map tasks. This can be used to overwrite."),
-    HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
-        "The log level to use for tasks executing as part of the DAG.\n" +
-        "Used only if hive.tez.java.opts is used to configure Java options."),
-
-    HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
-        "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced."),
-    HIVEENFORCESORTING("hive.enforce.sorting", false,
-        "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced."),
-    HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
-        "If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing \n" +
-        "bucketing/sorting for queries of the form: \n" +
-        "insert overwrite table T2 select * from T1;\n" +
-        "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
-    HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
-    HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
-        "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
-    HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
-        "If the user asked for bucketed map-side join, and it cannot be performed, \n" +
-        "should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
-        "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
-        "query will fail if hive.enforce.bucketmapjoin is set to true."),
-
-    HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false,
-        "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
-    HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
-        "hive.auto.convert.sortmerge.join.bigtable.selection.policy",
-        "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
-        "The policy to choose the big table for automatic conversion to sort-merge join. \n" +
-        "By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
-        ". based on position of the table - the leftmost table is selected\n" +
-        "org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
-        ". based on total size (all the partitions selected in the query) of the table \n" +
-        "org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoS

<TRUNCATED>

[36/55] [abbrv] hive git commit: HIVE-12312 : Excessive logging in PPD code (Carter Shanklin via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12312 : Excessive logging in PPD code (Carter Shanklin via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/08e9d267
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/08e9d267
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/08e9d267

Branch: refs/heads/spark
Commit: 08e9d267c5efabb704d22e6106db3d06ef28b221
Parents: c29a685
Author: Carter Shanklin <ca...@hortonworks.com>
Authored: Sun Nov 8 08:11:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Nov 9 10:26:54 2015 -0800

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/08e9d267/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
index 3605484..1702628 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
@@ -709,7 +709,7 @@ public final class OpProcFactory {
      * @param ewi
      */
     protected void logExpr(Node nd, ExprWalkerInfo ewi) {
-      if (!LOG.isInfoEnabled()) return;
+      if (!LOG.isDebugEnabled()) return;
       for (Entry<String, List<ExprNodeDesc>> e : ewi.getFinalCandidates().entrySet()) {
         StringBuilder sb = new StringBuilder("Pushdown predicates of ").append(nd.getName())
             .append(" for alias ").append(e.getKey()).append(": ");
@@ -721,7 +721,7 @@ public final class OpProcFactory {
           isFirst = false;
           sb.append(n.getExprString());
         }
-        LOG.info(sb.toString());
+        LOG.debug(sb.toString());
       }
     }
 


[32/55] [abbrv] hive git commit: HIVE-12263 : Hive SchemaTool does not tolerate leading spaces in JDBC url (Chen Xin Yu via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12263 : Hive SchemaTool does not tolerate leading spaces in JDBC url (Chen Xin Yu via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab7794c0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab7794c0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab7794c0

Branch: refs/heads/spark
Commit: ab7794c0255cb2803eac7d9b92fb60567d7ac867
Parents: 97735ec
Author: Chen Xin Yu <qi...@126.com>
Authored: Sun Oct 25 20:19:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Nov 7 11:17:18 2015 -0800

----------------------------------------------------------------------
 beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ab7794c0/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
index 7e8cc67..181f0d2 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
@@ -86,7 +86,7 @@ public class HiveSchemaHelper {
     if (confVarStr == null || confVarStr.isEmpty()) {
       throw new IOException("Empty " + confVar.varname);
     }
-    return confVarStr;
+    return confVarStr.trim();
   }
 
   public interface NestedScriptParser {


[55/55] [abbrv] hive git commit: HIVE-12390: Merge branch 'master' into spark

Posted by xu...@apache.org.
HIVE-12390: Merge branch 'master' into spark

Conflicts:
	ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
	ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
	spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cad0ea6a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cad0ea6a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cad0ea6a

Branch: refs/heads/spark
Commit: cad0ea6a98c71aa505e74e37b2558c50d13ba0f2
Parents: b02cd4a 206974a
Author: xzhang <xz...@xzdt>
Authored: Wed Nov 11 19:57:30 2015 -0800
Committer: xzhang <xz...@xzdt>
Committed: Wed Nov 11 19:57:30 2015 -0800

----------------------------------------------------------------------
 accumulo-handler/pom.xml                        |     4 -
 .../hadoop/hive/accumulo/LazyAccumuloRow.java   |     5 +-
 .../org/apache/hadoop/hive/accumulo/Utils.java  |     5 +-
 .../hive/accumulo/columns/ColumnMapper.java     |     5 +-
 .../accumulo/columns/ColumnMappingFactory.java  |     5 +-
 .../columns/HiveAccumuloColumnMapping.java      |     5 +-
 .../hive/accumulo/mr/HiveAccumuloSplit.java     |     5 +-
 .../predicate/AccumuloPredicateHandler.java     |     5 +-
 .../predicate/PrimitiveComparisonFilter.java    |     5 +-
 .../hive/accumulo/predicate/PushdownTuple.java  |     5 +-
 .../predicate/compare/StringCompare.java        |     3 -
 .../accumulo/serde/AccumuloRowSerializer.java   |     5 +-
 .../accumulo/serde/AccumuloSerDeParameters.java |     5 +-
 .../serde/CompositeAccumuloRowIdFactory.java    |     5 +-
 .../predicate/TestAccumuloPredicateHandler.java |     3 -
 .../serde/DelimitedAccumuloRowIdFactory.java    |     5 +-
 .../serde/FirstCharAccumuloCompositeRowId.java  |     5 +-
 .../hive/accumulo/serde/TestAccumuloSerDe.java  |     3 -
 beeline/pom.xml                                 |     5 -
 .../java/org/apache/hive/beeline/BeeLine.java   |    22 +-
 .../apache/hive/beeline/ClassNameCompleter.java |     6 +-
 .../apache/hive/beeline/HiveSchemaHelper.java   |     2 +-
 .../org/apache/hive/beeline/SQLCompleter.java   |     6 +-
 .../apache/hive/beeline/util/QFileClient.java   |     8 +-
 beeline/src/main/resources/beeline-log4j2.xml   |     4 +-
 .../hive/beeline/TestBeelineArgParsing.java     |    24 +-
 .../apache/hive/beeline/cli/TestHiveCli.java    |     6 +-
 cli/pom.xml                                     |     5 -
 .../org/apache/hadoop/hive/cli/CliDriver.java   |    18 +-
 .../hadoop/hive/cli/OptionsProcessor.java       |     6 +-
 common/pom.xml                                  |    10 -
 .../hadoop/hive/common/CallableWithNdc.java     |    44 -
 .../hadoop/hive/common/CompressionUtils.java    |    22 +-
 .../apache/hadoop/hive/common/FileUtils.java    |     8 +-
 .../hadoop/hive/common/JvmPauseMonitor.java     |    11 +-
 .../org/apache/hadoop/hive/common/LogUtils.java |     6 +-
 .../hadoop/hive/common/RunnableWithNdc.java     |    43 -
 .../apache/hadoop/hive/common/ServerUtils.java  |     6 +-
 .../common/io/NonSyncByteArrayOutputStream.java |     4 +
 .../common/jsonexplain/tez/TezJsonParser.java   |     8 +-
 .../hive/common/jsonexplain/tez/Vertex.java     |     9 +-
 .../metrics/metrics2/CodahaleMetrics.java       |    17 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    88 +-
 .../hadoop/hive/conf/SystemVariables.java       |     6 +-
 .../hadoop/hive/conf/VariableSubstitution.java  |     8 +-
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |     8 +-
 .../java/org/apache/hive/common/HiveCompat.java |     6 +-
 .../hive/common/util/FixedSizedObjectPool.java  |     6 +-
 .../apache/hive/common/util/HashCodeUtil.java   |   132 +
 .../hive/common/util/HiveStringUtils.java       |     2 +-
 .../apache/hive/common/util/HiveTestUtils.java  |     8 +-
 .../hive/common/util/HiveVersionInfo.java       |     6 +-
 .../hive/common/util/ShutdownHookManager.java   |     6 +-
 .../common/util/TestFixedSizedObjectPool.java   |     9 +-
 contrib/pom.xml                                 |     5 -
 .../genericudf/example/GenericUDFDBOutput.java  |     8 +-
 .../hive/contrib/serde2/MultiDelimitSerDe.java  |     5 +-
 .../hadoop/hive/contrib/serde2/RegexSerDe.java  |     6 +-
 .../hive/contrib/serde2/TypedBytesSerDe.java    |     6 +-
 .../contrib/serde2/s3/S3LogDeserializer.java    |     6 +-
 errata.txt                                      |     2 +-
 hbase-handler/pom.xml                           |     5 -
 .../AbstractHBaseKeyPredicateDecomposer.java    |     8 +-
 .../hive/hbase/CompositeHBaseKeyFactory.java    |     8 +-
 .../hive/hbase/HBaseLazyObjectFactory.java      |    28 +
 .../apache/hadoop/hive/hbase/HBaseSerDe.java    |    11 +-
 .../hadoop/hive/hbase/HBaseSerDeHelper.java     |     8 +-
 .../hadoop/hive/hbase/HBaseStorageHandler.java  |     6 +-
 .../HBaseTableSnapshotInputFormatUtil.java      |     6 +-
 .../hive/hbase/HiveHBaseTableInputFormat.java   |     6 +-
 .../hive/hbase/HiveHBaseTableOutputFormat.java  |     6 +-
 .../hive/hbase/HiveHFileOutputFormat.java       |     6 +-
 .../src/test/queries/positive/hbase_queries.q   |     4 +-
 .../results/positive/external_table_ppd.q.out   |    16 +-
 .../positive/hbase_binary_storage_queries.q.out |    32 +-
 .../test/results/positive/hbase_queries.q.out   |    37 +-
 .../test/results/positive/hbase_timestamp.q.out |     6 +-
 .../positive/hbase_timestamp_format.q.out       |    12 +-
 .../org/apache/hive/hcatalog/cli/HCatCli.java   |     8 +-
 .../mapreduce/HCatBaseOutputFormat.java         |     2 -
 .../hive/hcatalog/mapreduce/SpecialCases.java   |    16 +-
 .../hive/hcatalog/cli/TestSemanticAnalysis.java |     1 +
 .../listener/DbNotificationListener.java        |     6 +-
 .../messaging/json/JSONMessageFactory.java      |     6 +-
 .../streaming/AbstractRecordWriter.java         |     6 +-
 .../hcatalog/streaming/ConnectionError.java     |     4 +
 .../streaming/DelimitedInputWriter.java         |     6 +-
 .../hive/hcatalog/streaming/HiveEndPoint.java   |    57 +-
 .../hive/hcatalog/streaming/InvalidTable.java   |     4 +-
 .../streaming/StreamingIntegrationTester.java   |     6 +-
 .../hive/hcatalog/streaming/TestStreaming.java  |    35 +-
 .../hive/hcatalog/api/TestHCatClient.java       |     2 +-
 .../hcatalog/api/repl/CommandTestUtils.java     |     6 +-
 .../api/repl/commands/TestCommands.java         |     6 +-
 .../hive/hcatalog/templeton/AppConfig.java      |     6 +-
 .../templeton/CatchallExceptionMapper.java      |     6 +-
 .../hcatalog/templeton/CompleteDelegator.java   |     6 +-
 .../hcatalog/templeton/DeleteDelegator.java     |     6 +-
 .../hcatalog/templeton/ExecServiceImpl.java     |     6 +-
 .../hive/hcatalog/templeton/HcatDelegator.java  |     6 +-
 .../hcatalog/templeton/LauncherDelegator.java   |     6 +-
 .../apache/hive/hcatalog/templeton/Main.java    |    10 +-
 .../hive/hcatalog/templeton/PigDelegator.java   |     6 +-
 .../hcatalog/templeton/ProxyUserSupport.java    |     6 +-
 .../hcatalog/templeton/SecureProxySupport.java  |     6 +-
 .../apache/hive/hcatalog/templeton/Server.java  |     6 +-
 .../hive/hcatalog/templeton/SqoopDelegator.java |     6 +-
 .../hcatalog/templeton/StatusDelegator.java     |     6 +-
 .../hcatalog/templeton/tool/HDFSCleanup.java    |     6 +-
 .../hcatalog/templeton/tool/HDFSStorage.java    |     6 +-
 .../hive/hcatalog/templeton/tool/JobState.java  |     6 +-
 .../templeton/tool/JobStateTracker.java         |     6 +-
 .../hcatalog/templeton/tool/LaunchMapper.java   |     8 +-
 .../hcatalog/templeton/tool/LogRetriever.java   |     6 +-
 .../templeton/tool/TempletonControllerJob.java  |     6 +-
 .../hcatalog/templeton/tool/TempletonUtils.java |     6 +-
 .../templeton/tool/TrivialExecService.java      |     6 +-
 .../templeton/tool/ZooKeeperCleanup.java        |     6 +-
 .../templeton/tool/ZooKeeperStorage.java        |     6 +-
 hplsql/pom.xml                                  |     5 -
 .../antlr4/org/apache/hive/hplsql/Hplsql.g4     |    83 +-
 .../main/java/org/apache/hive/hplsql/Cmp.java   |     8 +-
 .../main/java/org/apache/hive/hplsql/Conn.java  |    25 +-
 .../main/java/org/apache/hive/hplsql/Copy.java  |     9 +-
 .../main/java/org/apache/hive/hplsql/Exec.java  |   236 +-
 .../java/org/apache/hive/hplsql/Package.java    |   194 +
 .../main/java/org/apache/hive/hplsql/Scope.java |    15 +-
 .../main/java/org/apache/hive/hplsql/Stmt.java  |     2 +-
 .../main/java/org/apache/hive/hplsql/Var.java   |    56 +-
 .../apache/hive/hplsql/functions/Function.java  |    44 +-
 .../hive/hplsql/functions/FunctionOra.java      |    33 +-
 hplsql/src/main/resources/hplsql-site.xml       |     2 +-
 .../org/apache/hive/hplsql/TestHplsqlLocal.java |    40 +
 .../apache/hive/hplsql/TestHplsqlOffline.java   |     5 +
 hplsql/src/test/queries/local/bool.sql          |    14 +
 .../src/test/queries/local/create_package.sql   |    60 +
 .../src/test/queries/local/create_package2.sql  |    23 +
 hplsql/src/test/queries/local/datatypes.sql     |    20 +
 hplsql/src/test/queries/local/declare2.sql      |    13 +
 hplsql/src/test/queries/local/float.sql         |     4 +
 hplsql/src/test/queries/local/var_scope.sql     |    28 +
 hplsql/src/test/queries/local/var_scope2.sql    |    30 +
 .../test/queries/local/var_scope_include.sql    |     1 +
 .../src/test/queries/offline/insert_mysql.sql   |     2 +
 hplsql/src/test/results/local/bool.out.txt      |    12 +
 .../test/results/local/create_package.out.txt   |    47 +
 .../test/results/local/create_package2.out.txt  |    16 +
 hplsql/src/test/results/local/datatypes.out.txt |    27 +
 hplsql/src/test/results/local/declare2.out.txt  |     7 +
 hplsql/src/test/results/local/float.out.txt     |     6 +
 hplsql/src/test/results/local/var_scope.out.txt |    26 +
 .../src/test/results/local/var_scope2.out.txt   |    26 +
 .../test/results/offline/insert_mysql.out.txt   |     4 +
 hwi/pom.xml                                     |     5 -
 .../hadoop/hive/hwi/HWIContextListener.java     |     6 +-
 .../org/apache/hadoop/hive/hwi/HWIServer.java   |     8 +-
 .../apache/hadoop/hive/hwi/HWISessionItem.java  |     8 +-
 .../hadoop/hive/hwi/HWISessionManager.java      |     6 +-
 ...CustomNonSettableStructObjectInspector1.java |     8 +-
 .../api/TestHCatClientNotification.java         |     6 +-
 .../listener/TestDbNotificationListener.java    |     8 +-
 .../vectorization/AbstractExpression.java       |   150 +
 .../vectorization/VectorizationBench.java       |   506 -
 .../VectorizedArithmeticBench.java              |   112 +
 .../VectorizedComparisonBench.java              |   215 +
 .../vectorization/VectorizedLogicBench.java     |   147 +
 itests/hive-unit/pom.xml                        |    17 +-
 .../hive/metastore/TestHiveMetaStore.java       |     6 +-
 .../hive/metastore/TestHiveMetaStoreTxns.java   |     3 +-
 ...TestHiveMetaStoreWithEnvironmentContext.java |    11 +-
 .../hive/metastore/TestMetastoreVersion.java    |     6 +-
 .../metastore/hbase/HBaseIntegrationTests.java  |     6 +-
 .../TestHBaseAggrStatsCacheIntegration.java     |     6 +-
 .../hive/metastore/hbase/TestHBaseImport.java   |     6 +-
 .../metastore/hbase/TestHBaseMetastoreSql.java  |     6 +-
 .../metastore/hbase/TestHBaseSchemaTool.java    |   584 +
 .../metastore/hbase/TestHBaseSchemaTool2.java   |    61 +
 .../hbase/TestHBaseStoreIntegration.java        |     6 +-
 .../hbase/TestStorageDescriptorSharing.java     |     6 +-
 .../TestHiveAuthorizerCheckInvocation.java      |     6 +-
 .../hadoop/hive/thrift/TestDBTokenStore.java    |     2 +-
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |   309 +-
 .../apache/hive/jdbc/TestServiceDiscovery.java  |   178 +
 .../hive/jdbc/cbo_rp_TestJdbcDriver2.java       |     6 +-
 itests/qtest-accumulo/pom.xml                   |     6 -
 itests/qtest-spark/pom.xml                      |     5 -
 itests/qtest/pom.xml                            |    11 +-
 .../test/resources/testconfiguration.properties |    13 +
 .../apache/hadoop/hive/serde2/TestSerDe.java    |     6 +-
 .../hive/udf/example/GenericUDFExampleAdd.java  |    48 +
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |    10 +-
 .../hive/ql/hooks/CheckColumnAccessHook.java    |     4 +-
 ...DummyHiveMetastoreAuthorizationProvider.java |     6 +-
 ...SQLStdHiveAuthorizationValidatorForTest.java |    47 +-
 .../hadoop/hive/ql/udf/UDFFileLookup.java       |     4 -
 .../hive/ql/udf/generic/GenericUDAFSumList.java |     6 +-
 jdbc/pom.xml                                    |    19 +-
 .../org/apache/hive/jdbc/HiveConnection.java    |   123 +-
 .../org/apache/hive/jdbc/HiveDataSource.java    |    24 +-
 .../apache/hive/jdbc/HiveDatabaseMetaData.java  |     4 +-
 .../java/org/apache/hive/jdbc/HiveDriver.java   |     6 +
 .../apache/hive/jdbc/HiveQueryResultSet.java    |     6 +-
 .../org/apache/hive/jdbc/HiveStatement.java     |    21 +-
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |     8 +-
 .../hive/jdbc/ZooKeeperHiveClientHelper.java    |    14 +-
 .../org/apache/hive/jdbc/HiveStatementTest.java |    31 +
 llap-client/pom.xml                             |    11 +-
 .../hive/llap/registry/ServiceInstance.java     |    73 +
 .../hive/llap/registry/ServiceInstanceSet.java  |    57 +
 .../hive/llap/registry/ServiceRegistry.java     |    59 +
 .../registry/impl/LlapFixedRegistryImpl.java    |   223 +
 .../llap/registry/impl/LlapRegistryService.java |    87 +
 .../registry/impl/LlapYarnRegistryImpl.java     |   383 +
 llap-server/pom.xml                             |    11 -
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |    91 +-
 .../hive/llap/cache/LowLevelCacheImpl.java      |     8 +-
 .../llap/cache/LowLevelCacheMemoryManager.java  |    12 +
 .../llap/cache/LowLevelFifoCachePolicy.java     |     3 -
 .../hadoop/hive/llap/cache/MemoryManager.java   |     1 +
 .../hive/llap/cli/LlapOptionsProcessor.java     |     6 +-
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |     6 +-
 .../hadoop/hive/llap/daemon/HistoryLogger.java  |     5 +-
 .../hive/llap/daemon/impl/AMReporter.java       |     6 +-
 .../llap/daemon/impl/ContainerRunnerImpl.java   |     2 +-
 .../hive/llap/daemon/impl/LlapDaemon.java       |     2 +-
 .../impl/LlapDaemonProtocolServerImpl.java      |     6 +-
 .../llap/daemon/impl/TaskExecutorService.java   |    21 +-
 .../llap/daemon/impl/TaskRunnerCallable.java    |     6 +-
 .../llap/daemon/registry/ServiceInstance.java   |    73 -
 .../daemon/registry/ServiceInstanceSet.java     |    57 -
 .../llap/daemon/registry/ServiceRegistry.java   |    59 -
 .../registry/impl/LlapFixedRegistryImpl.java    |   222 -
 .../registry/impl/LlapRegistryService.java      |    86 -
 .../registry/impl/LlapYarnRegistryImpl.java     |   383 -
 .../daemon/services/impl/LlapWebServices.java   |     2 -
 .../hive/llap/io/api/impl/LlapInputFormat.java  |     5 +-
 .../hive/llap/io/api/impl/LlapIoImpl.java       |     9 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |    14 +-
 .../hive/llap/shufflehandler/DirWatcher.java    |     6 +-
 .../shufflehandler/FadvisedChunkedFile.java     |     6 +-
 .../llap/shufflehandler/FadvisedFileRegion.java |     6 +-
 .../hive/llap/shufflehandler/IndexCache.java    |     6 +-
 .../llap/shufflehandler/ShuffleHandler.java     |    19 +-
 .../tezplugins/helpers/SourceStateTracker.java  |     6 +-
 .../dag/app/rm/LlapTaskSchedulerService.java    |    12 +-
 .../hive/llap/cache/TestBuddyAllocator.java     |    12 +-
 .../TestIncrementalObjectSizeEstimator.java     |     6 +-
 .../hive/llap/cache/TestLowLevelCacheImpl.java  |     6 +-
 .../llap/cache/TestLowLevelLrfuCachePolicy.java |     6 +-
 .../hive/llap/cache/TestOrcMetadataCache.java   |     8 +-
 .../hive/llap/daemon/MiniLlapCluster.java       |     6 +-
 .../app/rm/TestLlapTaskSchedulerService.java    |     2 +-
 metastore/if/hive_metastore.thrift              |    14 +
 metastore/pom.xml                               |    13 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  3433 +++--
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |   334 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |    10 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |   349 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |    65 +
 .../hadoop/hive/metastore/api/TableMeta.java    |   701 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 13687 ++++++++++-------
 .../gen-php/metastore/ThriftHiveMetastore.php   |  1915 ++-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   144 +
 .../hive_metastore/ThriftHiveMetastore-remote   |    14 +
 .../hive_metastore/ThriftHiveMetastore.py       |  1353 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   110 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |    25 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   149 +
 .../hive/metastore/AggregateStatsCache.java     |    34 +-
 .../apache/hadoop/hive/metastore/Deadline.java  |     6 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |    12 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    56 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    54 +-
 .../hive/metastore/HiveMetaStoreFsImpl.java     |     8 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    23 +
 .../hive/metastore/MetaStoreDirectSql.java      |     6 +-
 .../hadoop/hive/metastore/MetaStoreInit.java    |     6 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |    43 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   124 +-
 .../hive/metastore/PartFilterExprUtil.java      |     6 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |     4 +
 .../hadoop/hive/metastore/RawStoreProxy.java    |     6 -
 .../hive/metastore/RetryingHMSHandler.java      |     6 +-
 .../hive/metastore/RetryingMetaStoreClient.java |     6 +-
 .../hive/metastore/TUGIBasedProcessor.java      |     6 +-
 .../apache/hadoop/hive/metastore/Warehouse.java |     6 +-
 .../hive/metastore/events/EventCleanerTask.java |     8 +-
 .../hbase/AggrStatsInvalidatorFilter.java       |     8 +-
 .../hive/metastore/hbase/HBaseImport.java       |     6 +-
 .../hive/metastore/hbase/HBaseReadWrite.java    |   704 +-
 .../hive/metastore/hbase/HBaseSchemaTool.java   |   282 +-
 .../hadoop/hive/metastore/hbase/HBaseStore.java |    50 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |   109 +-
 .../metastore/hbase/PartitionKeyComparator.java |     6 +-
 .../hbase/SharedStorageDescriptor.java          |     6 +-
 .../hadoop/hive/metastore/hbase/StatsCache.java |     6 +-
 .../metastore/hbase/TephraHBaseConnection.java  |     6 +-
 .../metastore/hbase/VanillaHBaseConnection.java |     6 +-
 .../hadoop/hive/metastore/parser/Filter.g       |   218 +
 .../spec/CompositePartitionSpecProxy.java       |     4 +-
 .../hive/metastore/tools/HiveMetaTool.java      |     6 +-
 .../metastore/txn/CompactionTxnHandler.java     |     6 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |     6 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |     6 +-
 .../metastore/DummyMetaStoreInitListener.java   |     4 +-
 .../DummyRawStoreControlledCommit.java          |     7 +
 .../DummyRawStoreForJdoConnection.java          |     7 +
 .../hive/metastore/VerifyingObjectStore.java    |     6 +-
 .../hbase/TestHBaseAggregateStatsCache.java     |     6 +-
 .../hive/metastore/hbase/TestHBaseStore.java    |     6 +-
 .../metastore/hbase/TestHBaseStoreCached.java   |     6 +-
 .../hbase/TestSharedStorageDescriptor.java      |     6 +-
 .../metastore/txn/TestTxnHandlerNegative.java   |     6 +-
 pom.xml                                         |    26 +-
 ql/pom.xml                                      |    10 -
 .../org/apache/hadoop/hive/llap/LogLevels.java  |     4 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    10 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |    93 +-
 .../apache/hadoop/hive/ql/DriverContext.java    |     8 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |     6 -
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |     3 -
 .../hive/ql/exec/AbstractFileMergeOperator.java |     9 +-
 .../hadoop/hive/ql/exec/ArchiveUtils.java       |     6 +-
 .../hadoop/hive/ql/exec/AutoProgressor.java     |     6 +-
 .../hadoop/hive/ql/exec/ColumnStatsTask.java    |     6 +-
 .../hive/ql/exec/ColumnStatsUpdateTask.java     |    10 +-
 .../hadoop/hive/ql/exec/CommonJoinOperator.java |     6 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |    14 +-
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |     6 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |    52 +-
 .../hive/ql/exec/DefaultBucketMatcher.java      |     6 +-
 .../hadoop/hive/ql/exec/DemuxOperator.java      |     6 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |     8 +-
 .../ql/exec/ExprNodeGenericFuncEvaluator.java   |     8 +-
 .../hadoop/hive/ql/exec/FetchOperator.java      |     7 +-
 .../apache/hadoop/hive/ql/exec/FetchTask.java   |     6 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |     9 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   133 +-
 .../hadoop/hive/ql/exec/FunctionTask.java       |    10 +-
 .../hive/ql/exec/HashTableSinkOperator.java     |     8 +-
 .../apache/hadoop/hive/ql/exec/Heartbeater.java |     6 +-
 .../hive/ql/exec/HiveTotalOrderPartitioner.java |     6 +-
 .../hadoop/hive/ql/exec/JoinOperator.java       |     6 +-
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |    20 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |    26 +-
 .../apache/hadoop/hive/ql/exec/MapOperator.java |    27 +
 .../hadoop/hive/ql/exec/MapredContext.java      |    10 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |     9 +-
 .../apache/hadoop/hive/ql/exec/MuxOperator.java |     6 +-
 .../hadoop/hive/ql/exec/ObjectCacheFactory.java |     6 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |    58 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |     6 +-
 .../hadoop/hive/ql/exec/OperatorUtils.java      |    17 +-
 .../hive/ql/exec/OrcFileMergeOperator.java      |     6 +-
 .../hadoop/hive/ql/exec/PTFPartition.java       |     6 +-
 .../hive/ql/exec/PartitionKeySampler.java       |     6 +-
 .../hive/ql/exec/RCFileMergeOperator.java       |     6 +-
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |    10 +
 .../apache/hadoop/hive/ql/exec/Registry.java    |    18 +-
 .../hadoop/hive/ql/exec/SMBMapJoinOperator.java |     8 +-
 .../hadoop/hive/ql/exec/SkewJoinHandler.java    |     8 +-
 .../ql/exec/SparkHashTableSinkOperator.java     |     6 +-
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |     6 +-
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |     6 +-
 .../org/apache/hadoop/hive/ql/exec/Task.java    |     6 +-
 .../hive/ql/exec/TezDummyStoreOperator.java     |     9 +
 .../apache/hadoop/hive/ql/exec/TopNHash.java    |    13 +-
 .../hadoop/hive/ql/exec/UDTFOperator.java       |     6 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    26 +-
 .../hive/ql/exec/errors/TaskLogProcessor.java   |     6 +-
 .../mapjoin/MapJoinMemoryExhaustionHandler.java |     6 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    26 +-
 .../hadoop/hive/ql/exec/mr/ExecMapper.java      |    18 +-
 .../hive/ql/exec/mr/ExecMapperContext.java      |     3 -
 .../hadoop/hive/ql/exec/mr/ExecReducer.java     |     8 +-
 .../hadoop/hive/ql/exec/mr/HashTableLoader.java |     6 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |     8 +-
 .../hadoop/hive/ql/exec/mr/ObjectCache.java     |    16 +-
 .../apache/hadoop/hive/ql/exec/mr/Throttle.java |     4 +-
 .../persistence/BytesBytesMultiHashMap.java     |     6 +-
 .../ql/exec/persistence/FlatRowContainer.java   |     6 +-
 .../ql/exec/persistence/HashMapWrapper.java     |     6 +-
 .../persistence/HybridHashTableContainer.java   |    69 +-
 .../ql/exec/persistence/KeyValueContainer.java  |     6 +-
 .../persistence/MapJoinBytesTableContainer.java |    11 +-
 .../hive/ql/exec/persistence/MapJoinKey.java    |     4 +-
 .../ql/exec/persistence/MapJoinKeyObject.java   |     6 +-
 .../ql/exec/persistence/ObjectContainer.java    |     6 +-
 .../hive/ql/exec/persistence/RowContainer.java  |     6 +-
 .../hive/ql/exec/spark/HashTableLoader.java     |     6 +-
 .../hive/ql/exec/spark/HiveKVResultCache.java   |     6 +-
 .../ql/exec/spark/HiveSparkClientFactory.java   |     6 +-
 .../hive/ql/exec/spark/KryoSerializer.java      |     6 +-
 .../ql/exec/spark/LocalHiveSparkClient.java     |    16 +-
 .../ql/exec/spark/RemoteHiveSparkClient.java    |     6 +-
 .../hive/ql/exec/spark/SmallTableCache.java     |     6 +-
 .../exec/spark/SparkDynamicPartitionPruner.java |     6 +-
 .../ql/exec/spark/SparkMapRecordHandler.java    |     9 +-
 .../exec/spark/SparkMergeFileRecordHandler.java |     6 +-
 .../hadoop/hive/ql/exec/spark/SparkPlan.java    |     8 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |     7 +-
 .../hive/ql/exec/spark/SparkRecordHandler.java  |    10 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |     8 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |     8 +-
 .../ql/exec/spark/session/SparkSessionImpl.java |     6 +-
 .../spark/session/SparkSessionManagerImpl.java  |     6 +-
 .../ql/exec/spark/status/SparkJobMonitor.java   |     8 +-
 .../spark/status/impl/JobMetricsListener.java   |     7 +-
 .../spark/status/impl/LocalSparkJobStatus.java  |     6 +-
 .../spark/status/impl/RemoteSparkJobStatus.java |     6 +-
 .../ql/exec/tez/ColumnarSplitSizeEstimator.java |     6 +-
 .../hive/ql/exec/tez/CustomPartitionEdge.java   |     6 +-
 .../hive/ql/exec/tez/CustomPartitionVertex.java |     7 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |     6 +-
 .../ql/exec/tez/DynamicPartitionPruner.java     |     6 +-
 .../hive/ql/exec/tez/HashTableLoader.java       |     6 +-
 .../hive/ql/exec/tez/HivePreWarmProcessor.java  |     6 +-
 .../hive/ql/exec/tez/HiveSplitGenerator.java    |     6 +-
 .../hive/ql/exec/tez/LlapObjectCache.java       |    24 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java    |    33 +-
 .../hive/ql/exec/tez/MapRecordSource.java       |     8 +-
 .../ql/exec/tez/MergeFileRecordProcessor.java   |    10 +-
 .../hadoop/hive/ql/exec/tez/ObjectCache.java    |     6 +-
 .../hive/ql/exec/tez/RecordProcessor.java       |     7 +-
 .../hive/ql/exec/tez/ReduceRecordProcessor.java |     6 +-
 .../hive/ql/exec/tez/ReduceRecordSource.java    |    26 +-
 .../hadoop/hive/ql/exec/tez/SplitGrouper.java   |     6 +-
 .../hive/ql/exec/tez/TezJobExecHelper.java      |     6 +-
 .../hadoop/hive/ql/exec/tez/TezProcessor.java   |    17 +-
 .../hive/ql/exec/tez/TezSessionPoolManager.java |     6 +-
 .../hive/ql/exec/tez/TezSessionState.java       |    10 +-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |     9 +
 .../ql/exec/tez/tools/KeyValueInputMerger.java  |     6 +-
 .../ql/exec/tez/tools/KeyValuesInputMerger.java |     6 +-
 .../hive/ql/exec/vector/VectorAssignRow.java    |     8 +-
 .../ql/exec/vector/VectorColumnOrderedMap.java  |     6 +-
 .../hive/ql/exec/vector/VectorCopyRow.java      |     8 +-
 .../ql/exec/vector/VectorDeserializeRow.java    |    68 +-
 .../exec/vector/VectorExpressionDescriptor.java |     6 +-
 .../hive/ql/exec/vector/VectorExtractRow.java   |     8 +-
 .../ql/exec/vector/VectorGroupByOperator.java   |     8 +-
 .../exec/vector/VectorMapJoinBaseOperator.java  |     8 +-
 .../ql/exec/vector/VectorMapJoinOperator.java   |     6 +-
 .../exec/vector/VectorSMBMapJoinOperator.java   |     6 +-
 .../hive/ql/exec/vector/VectorSerializeRow.java |    68 +-
 .../exec/vector/VectorSerializeRowNoNulls.java  |   412 -
 .../ql/exec/vector/VectorizationContext.java    |    24 +-
 .../ql/exec/vector/VectorizedBatchUtil.java     |     6 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |     6 +-
 .../expressions/FilterStructColumnInList.java   |     3 +-
 .../ql/exec/vector/expressions/NullUtil.java    |    21 +-
 .../vector/expressions/StructColumnInList.java  |     3 +-
 .../vector/expressions/VectorUDFDateString.java |    10 +-
 .../exec/vector/keyseries/VectorKeySeries.java  |    98 +
 .../VectorKeySeriesBytesSerialized.java         |   271 +
 .../vector/keyseries/VectorKeySeriesImpl.java   |    68 +
 .../VectorKeySeriesLongSerialized.java          |   249 +
 .../VectorKeySeriesMultiSerialized.java         |   187 +
 .../keyseries/VectorKeySeriesSerialized.java    |    35 +
 .../VectorKeySeriesSerializedImpl.java          |   130 +
 .../keyseries/VectorKeySeriesSingleImpl.java    |   158 +
 .../mapjoin/VectorMapJoinCommonOperator.java    |    55 +-
 .../VectorMapJoinGenerateResultOperator.java    |    37 +-
 ...pJoinInnerBigOnlyGenerateResultOperator.java |     8 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |    14 +-
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |    38 +-
 ...VectorMapJoinInnerBigOnlyStringOperator.java |    14 +-
 ...ectorMapJoinInnerGenerateResultOperator.java |     8 +-
 .../mapjoin/VectorMapJoinInnerLongOperator.java |    14 +-
 .../VectorMapJoinInnerMultiKeyOperator.java     |    38 +-
 .../VectorMapJoinInnerStringOperator.java       |    14 +-
 ...orMapJoinLeftSemiGenerateResultOperator.java |     8 +-
 .../VectorMapJoinLeftSemiLongOperator.java      |    14 +-
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |    38 +-
 .../VectorMapJoinLeftSemiStringOperator.java    |    14 +-
 ...ectorMapJoinOuterGenerateResultOperator.java |    20 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |    16 +-
 .../VectorMapJoinOuterMultiKeyOperator.java     |    21 +-
 .../VectorMapJoinOuterStringOperator.java       |    16 +-
 .../mapjoin/VectorMapJoinRowBytesContainer.java |     6 +-
 .../fast/VectorMapJoinFastBytesHashMap.java     |    11 +-
 .../VectorMapJoinFastBytesHashMultiSet.java     |    11 +-
 .../fast/VectorMapJoinFastBytesHashSet.java     |    11 +-
 .../fast/VectorMapJoinFastBytesHashTable.java   |    17 +-
 .../fast/VectorMapJoinFastBytesHashUtil.java    |     4 -
 .../fast/VectorMapJoinFastHashTable.java        |     8 +-
 .../fast/VectorMapJoinFastHashTableLoader.java  |     8 +-
 .../fast/VectorMapJoinFastIntHashUtil.java      |    32 -
 .../mapjoin/fast/VectorMapJoinFastKeyStore.java |     8 +-
 .../fast/VectorMapJoinFastLongHashMap.java      |    11 +-
 .../fast/VectorMapJoinFastLongHashMultiSet.java |    11 +-
 .../fast/VectorMapJoinFastLongHashSet.java      |    11 +-
 .../fast/VectorMapJoinFastLongHashTable.java    |    19 +-
 .../fast/VectorMapJoinFastLongHashUtil.java     |    11 -
 .../fast/VectorMapJoinFastTableContainer.java   |     8 +-
 .../fast/VectorMapJoinFastValueStore.java       |     8 +-
 .../VectorMapJoinOptimizedCreateHashTable.java  |     8 +-
 .../VectorMapJoinOptimizedHashTable.java        |     6 +-
 .../VectorMapJoinOptimizedLongCommon.java       |     8 +-
 .../VectorReduceSinkCommonOperator.java         |   416 +
 .../VectorReduceSinkLongOperator.java           |    72 +
 .../VectorReduceSinkMultiKeyOperator.java       |    68 +
 .../VectorReduceSinkStringOperator.java         |    70 +
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |     5 +
 .../ql/exec/vector/udf/VectorUDFArgDesc.java    |    19 +-
 .../hadoop/hive/ql/history/HiveHistoryImpl.java |     8 +-
 .../hive/ql/history/HiveHistoryViewer.java      |     6 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |    27 +-
 .../hadoop/hive/ql/hooks/LineageLogger.java     |    16 +-
 .../hive/ql/hooks/PostExecOrcFileDump.java      |     6 +-
 .../ql/hooks/PostExecTezSummaryPrinter.java     |     6 +-
 .../hadoop/hive/ql/hooks/WriteEntity.java       |     6 +-
 .../apache/hadoop/hive/ql/index/HiveIndex.java  |     6 +-
 .../hive/ql/index/HiveIndexQueryContext.java    |    10 +-
 .../hadoop/hive/ql/index/HiveIndexResult.java   |     8 +-
 .../hive/ql/index/HiveIndexedInputFormat.java   |     6 +-
 .../ql/index/bitmap/BitmapIndexHandler.java     |     6 +-
 .../ql/index/compact/CompactIndexHandler.java   |     6 +-
 .../compact/HiveCompactIndexInputFormat.java    |     8 +-
 .../hadoop/hive/ql/io/AcidInputFormat.java      |    14 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |     6 +-
 .../hive/ql/io/BucketizedHiveInputFormat.java   |     8 +-
 .../org/apache/hadoop/hive/ql/io/CodecPool.java |     6 +-
 .../hive/ql/io/CombineHiveInputFormat.java      |     6 +-
 .../org/apache/hadoop/hive/ql/io/HdfsUtils.java |     6 +-
 .../ql/io/HiveContextAwareRecordReader.java     |     8 +-
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |     6 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |    10 +-
 .../apache/hadoop/hive/ql/io/IOContextMap.java  |     6 +-
 .../hadoop/hive/ql/io/NullRowsInputFormat.java  |     6 +-
 .../org/apache/hadoop/hive/ql/io/RCFile.java    |     6 +-
 .../hadoop/hive/ql/io/StorageFormatFactory.java |     6 +-
 .../ql/io/avro/AvroContainerOutputFormat.java   |     6 +-
 .../ql/io/avro/AvroGenericRecordReader.java     |     6 +-
 .../hive/ql/io/merge/MergeFileMapper.java       |     6 +-
 .../hadoop/hive/ql/io/merge/MergeFileTask.java  |     2 +-
 .../hadoop/hive/ql/io/merge/MergeFileWork.java  |     6 +-
 .../apache/hadoop/hive/ql/io/orc/InStream.java  |     6 +-
 .../hadoop/hive/ql/io/orc/MemoryManager.java    |     6 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |    30 +-
 .../hive/ql/io/orc/OrcNewInputFormat.java       |     6 +-
 .../hadoop/hive/ql/io/orc/OrcOutputFormat.java  |     6 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java      |     6 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |     6 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSerde.java  |     6 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |     6 +-
 .../apache/hadoop/hive/ql/io/orc/OrcUtils.java  |     6 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |     8 +-
 .../hive/ql/io/orc/RecordReaderFactory.java     |     6 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |    11 +-
 .../ql/io/orc/RunLengthIntegerReaderV2.java     |     6 +-
 .../hadoop/hive/ql/io/orc/WriterImpl.java       |     6 +-
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |   115 +-
 .../hive/ql/io/parquet/LeafFilterFactory.java   |     6 +-
 .../ql/io/parquet/MapredParquetInputFormat.java |     6 +-
 .../io/parquet/MapredParquetOutputFormat.java   |     6 +-
 .../hive/ql/io/parquet/ProjectionPusher.java    |     6 +-
 .../parquet/VectorizedParquetInputFormat.java   |     8 +-
 .../read/ParquetFilterPredicateConverter.java   |     6 +-
 .../read/ParquetRecordReaderWrapper.java        |     6 +-
 .../ql/io/parquet/write/DataWritableWriter.java |     8 +-
 .../write/ParquetRecordWriterWrapper.java       |     6 +-
 .../ql/io/rcfile/stats/PartialScanMapper.java   |     6 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |     7 +-
 .../rcfile/truncate/ColumnTruncateMapper.java   |     6 +-
 .../io/rcfile/truncate/ColumnTruncateTask.java  |     5 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |     6 +-
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |     6 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |     6 +-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |     8 +-
 .../hive/ql/lockmgr/EmbeddedLockManager.java    |    19 +-
 .../zookeeper/CuratorFrameworkSingleton.java    |     6 +-
 .../zookeeper/ZooKeeperHiveLockManager.java     |    16 +-
 .../hadoop/hive/ql/metadata/DummyPartition.java |     8 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |    83 +-
 .../hive/ql/metadata/HiveMetaStoreChecker.java  |     6 +-
 .../hadoop/hive/ql/metadata/HiveUtils.java      |     6 +-
 .../hadoop/hive/ql/metadata/Partition.java      |    23 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |    84 +-
 .../apache/hadoop/hive/ql/metadata/Table.java   |    44 +-
 .../formatting/JsonMetaDataFormatter.java       |     6 +-
 .../formatting/TextMetaDataFormatter.java       |     6 +-
 .../ql/optimizer/AbstractBucketJoinProc.java    |     4 -
 ...tionSizeBasedBigTableSelectorForAutoSMJ.java |     8 +-
 .../hive/ql/optimizer/BucketJoinProcCtx.java    |     8 +-
 .../ql/optimizer/BucketMapJoinOptimizer.java    |     6 +-
 .../BucketingSortingReduceSinkOptimizer.java    |     6 +
 .../hive/ql/optimizer/ColumnPrunerProcCtx.java  |     7 +
 .../ql/optimizer/ColumnPrunerProcFactory.java   |     6 +-
 .../hive/ql/optimizer/ConstantPropagate.java    |     6 +-
 .../ql/optimizer/ConstantPropagateProcCtx.java  |    21 +-
 .../optimizer/ConstantPropagateProcFactory.java |     6 +-
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |     6 +-
 .../DynamicPartitionPruningOptimization.java    |     6 +-
 .../hive/ql/optimizer/GenMRFileSink1.java       |     6 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |    17 +-
 .../hive/ql/optimizer/GlobalLimitOptimizer.java |     6 +-
 .../hive/ql/optimizer/GroupByOptimizer.java     |    14 +-
 .../ql/optimizer/IdentityProjectRemover.java    |     6 +-
 .../hadoop/hive/ql/optimizer/IndexUtils.java    |     6 +-
 .../hive/ql/optimizer/MapJoinProcessor.java     |     6 +-
 .../ql/optimizer/OperatorComparatorFactory.java |     8 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java     |    29 +-
 .../ql/optimizer/PartitionColumnsSeparator.java |   525 +
 .../hive/ql/optimizer/PointLookupOptimizer.java |    96 +-
 .../hadoop/hive/ql/optimizer/PrunerUtils.java   |     8 -
 .../ql/optimizer/ReduceSinkMapJoinProc.java     |     6 +-
 .../optimizer/RemoveDynamicPruningBySize.java   |     6 +-
 .../hadoop/hive/ql/optimizer/SamplePruner.java  |     8 +-
 .../ql/optimizer/SetReducerParallelism.java     |     6 +-
 .../hive/ql/optimizer/SimpleFetchOptimizer.java |     6 +-
 .../hive/ql/optimizer/SkewJoinOptimizer.java    |     6 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |     6 +-
 .../SortedMergeBucketMapJoinOptimizer.java      |     8 +-
 .../SparkRemoveDynamicPruningBySize.java        |     8 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |    50 +-
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |    15 +-
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |     6 +-
 .../ql/optimizer/calcite/RelOptHiveTable.java   |     8 +-
 .../optimizer/calcite/cost/HiveCostModel.java   |     6 +-
 .../calcite/cost/HiveOnTezCostModel.java        |     6 +-
 .../calcite/reloperators/HiveTableScan.java     |     6 +-
 .../rules/HiveInsertExchange4JoinRule.java      |     8 +-
 .../calcite/rules/HiveJoinAddNotNullRule.java   |     8 +-
 .../calcite/rules/HiveJoinToMultiJoinRule.java  |     6 +-
 .../calcite/rules/HivePreFilteringRule.java     |    10 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |   143 +-
 .../optimizer/calcite/rules/PartitionPrune.java |     6 +-
 .../calcite/stats/HiveRelMdRowCount.java        |    20 +-
 .../calcite/translator/ASTConverter.java        |    40 +-
 .../calcite/translator/ExprNodeConverter.java   |    12 +-
 .../calcite/translator/HiveOpConverter.java     |    13 +-
 .../translator/HiveOpConverterPostProc.java     |     6 +-
 .../translator/PlanModifierForASTConv.java      |     9 +-
 .../calcite/translator/PlanModifierUtil.java    |     6 +-
 .../calcite/translator/RexNodeConverter.java    |    19 +-
 .../translator/SqlFunctionConverter.java        |     6 +-
 .../correlation/CorrelationOptimizer.java       |     6 +-
 .../QueryPlanTreeTransformation.java            |     6 +-
 .../ql/optimizer/index/RewriteCanApplyCtx.java  |     6 +-
 .../ql/optimizer/index/RewriteGBUsingIndex.java |     6 +-
 .../index/RewriteParseContextGenerator.java     |     6 +-
 .../RewriteQueryUsingAggregateIndexCtx.java     |     6 +-
 .../ql/optimizer/lineage/OpProcFactory.java     |    25 +-
 .../LBPartitionProcFactory.java                 |     6 +-
 .../ListBucketingPruner.java                    |     6 +-
 .../pcr/PartitionConditionRemover.java          |     8 +-
 .../ql/optimizer/pcr/PcrExprProcFactory.java    |    39 +-
 .../hive/ql/optimizer/pcr/PcrOpProcFactory.java |     8 +-
 .../optimizer/physical/CrossProductCheck.java   |     8 +-
 .../physical/GenSparkSkewJoinProcessor.java     |     6 +-
 .../hive/ql/optimizer/physical/LlapDecider.java |     8 +-
 .../physical/LocalMapJoinProcFactory.java       |     6 +-
 .../ql/optimizer/physical/MemoryDecider.java    |     6 +-
 .../physical/MetadataOnlyOptimizer.java         |     6 +-
 .../optimizer/physical/NullScanOptimizer.java   |     6 +-
 .../physical/NullScanTaskDispatcher.java        |    12 +-
 .../ql/optimizer/physical/SerializeFilter.java  |     6 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   235 +-
 .../physical/index/IndexWhereProcCtx.java       |     6 +-
 .../physical/index/IndexWhereProcessor.java     |     6 +-
 .../hive/ql/optimizer/ppr/OpProcFactory.java    |     3 +-
 .../ppr/PartitionExpressionForMetastore.java    |     6 +-
 .../hive/ql/optimizer/ppr/PartitionPruner.java  |     6 +-
 .../spark/CombineEquivalentWorkResolver.java    |     6 +-
 .../spark/SetSparkReducerParallelism.java       |     6 +-
 .../optimizer/spark/SparkMapJoinOptimizer.java  |     6 +-
 .../spark/SparkReduceSinkMapJoinProc.java       |     6 +-
 .../stats/annotation/StatsRulesProcFactory.java |    10 +-
 .../ql/optimizer/unionproc/UnionProcessor.java  |     5 -
 .../hive/ql/parse/AppMasterEventProcessor.java  |     6 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |    13 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |     8 +-
 .../hive/ql/parse/ColumnAccessAnalyzer.java     |     6 +-
 .../ql/parse/ColumnStatsSemanticAnalyzer.java   |     8 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    11 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |     6 +-
 .../hadoop/hive/ql/parse/FileSinkProcessor.java |     8 +-
 .../hive/ql/parse/FunctionSemanticAnalyzer.java |    10 +-
 .../hadoop/hive/ql/parse/GenTezUtils.java       |     6 +-
 .../apache/hadoop/hive/ql/parse/GenTezWork.java |    10 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |     5 -
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |     4 +
 .../hadoop/hive/ql/parse/InputSignature.java    |     6 +-
 .../hive/ql/parse/MacroSemanticAnalyzer.java    |     8 +-
 .../hadoop/hive/ql/parse/MapReduceCompiler.java |     6 +-
 .../hive/ql/parse/MetaDataExportListener.java   |     6 +-
 .../hadoop/hive/ql/parse/PTFTranslator.java     |     6 +-
 .../hadoop/hive/ql/parse/ParseDriver.java       |     6 +-
 .../hive/ql/parse/ProcessAnalyzeTable.java      |     6 +-
 .../org/apache/hadoop/hive/ql/parse/QB.java     |     6 +-
 .../org/apache/hadoop/hive/ql/parse/QBExpr.java |     6 +-
 .../apache/hadoop/hive/ql/parse/QBMetaData.java |     6 +-
 .../hadoop/hive/ql/parse/QBParseInfo.java       |     6 +-
 .../hadoop/hive/ql/parse/RowResolver.java       |     6 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    80 +-
 .../hive/ql/parse/TableAccessAnalyzer.java      |     6 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |     6 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |     7 +-
 .../hadoop/hive/ql/parse/TypeCheckCtx.java      |     8 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |    11 +-
 .../hadoop/hive/ql/parse/UnionProcessor.java    |     6 +-
 .../hive/ql/parse/spark/GenSparkUtils.java      |     6 +-
 .../hive/ql/parse/spark/GenSparkWork.java       |     6 +-
 .../hive/ql/parse/spark/SparkCompiler.java      |     6 +-
 .../ql/parse/spark/SparkFileSinkProcessor.java  |     8 +-
 .../SparkPartitionPruningSinkOperator.java      |     6 +-
 .../parse/spark/SparkProcessAnalyzeTable.java   |     6 +-
 .../apache/hadoop/hive/ql/plan/BaseWork.java    |    10 +-
 .../ql/plan/ConditionalResolverCommonJoin.java  |     6 +-
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |     8 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |    15 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |     9 -
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |     4 +-
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |     8 +-
 .../org/apache/hadoop/hive/ql/plan/PTFDesc.java |     6 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |    44 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |    23 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |     8 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |     7 +
 .../org/apache/hadoop/hive/ql/plan/TezWork.java |     6 +-
 .../hive/ql/plan/VectorReduceSinkDesc.java      |    64 +
 .../hive/ql/plan/VectorReduceSinkInfo.java      |   123 +
 .../hadoop/hive/ql/ppd/ExprWalkerInfo.java      |     6 +-
 .../hive/ql/ppd/ExprWalkerProcFactory.java      |    18 +-
 .../hadoop/hive/ql/ppd/OpProcFactory.java       |    24 +-
 .../hadoop/hive/ql/ppd/PredicatePushDown.java   |     6 +-
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |     6 +-
 .../ql/processors/AddResourceProcessor.java     |     6 +-
 .../ql/processors/CommandProcessorFactory.java  |     6 +-
 .../hadoop/hive/ql/processors/CommandUtil.java  |     6 +-
 .../hive/ql/processors/CompileProcessor.java    |     6 +-
 .../hive/ql/processors/CryptoProcessor.java     |     6 +-
 .../ql/processors/DeleteResourceProcessor.java  |     6 +-
 .../hadoop/hive/ql/processors/DfsProcessor.java |     6 +-
 .../hive/ql/processors/ReloadProcessor.java     |     6 +-
 .../AuthorizationPreEventListener.java          |     6 +-
 .../HiveAuthorizationProviderBase.java          |     6 +-
 .../StorageBasedAuthorizationProvider.java      |     6 +-
 .../AuthorizationMetaStoreFilterHook.java       |    10 +-
 .../sqlstd/DummyHiveAuthorizationValidator.java |     6 +-
 .../plugin/sqlstd/Operation2Privilege.java      |    12 +-
 .../plugin/sqlstd/SQLAuthorizationUtils.java    |     6 +-
 .../sqlstd/SQLStdHiveAccessController.java      |     6 +-
 .../SQLStdHiveAuthorizationValidator.java       |     6 +-
 .../hive/ql/session/DependencyResolver.java     |     8 +-
 .../hadoop/hive/ql/session/OperationLog.java    |    12 +-
 .../hadoop/hive/ql/session/SessionState.java    |    20 +-
 .../hive/ql/stats/CounterStatsAggregator.java   |     6 +-
 .../ql/stats/CounterStatsAggregatorSpark.java   |     6 +-
 .../ql/stats/CounterStatsAggregatorTez.java     |     6 +-
 .../hive/ql/stats/CounterStatsPublisher.java    |     6 +-
 .../hadoop/hive/ql/stats/StatsFactory.java      |     6 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |    77 +-
 .../hive/ql/stats/fs/FSStatsAggregator.java     |    10 +-
 .../hive/ql/stats/fs/FSStatsPublisher.java      |    12 +-
 .../hive/ql/txn/AcidHouseKeeperService.java     |    10 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |     6 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |     6 +-
 .../hive/ql/txn/compactor/CompactorThread.java  |     6 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |     9 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java    |     8 +-
 .../org/apache/hadoop/hive/ql/udf/UDFE.java     |     6 +-
 .../org/apache/hadoop/hive/ql/udf/UDFPI.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFAverage.java |     6 +-
 .../ql/udf/generic/GenericUDAFComputeStats.java |    22 +-
 .../udf/generic/GenericUDAFContextNGrams.java   |     6 +-
 .../ql/udf/generic/GenericUDAFEWAHBitmap.java   |     6 +-
 .../ql/udf/generic/GenericUDAFFirstValue.java   |     6 +-
 .../generic/GenericUDAFHistogramNumeric.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFLag.java     |     6 +-
 .../ql/udf/generic/GenericUDAFLastValue.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFLead.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFLeadLag.java |     6 +-
 .../hive/ql/udf/generic/GenericUDAFMax.java     |     6 +-
 .../hive/ql/udf/generic/GenericUDAFMin.java     |     6 +-
 .../hive/ql/udf/generic/GenericUDAFNTile.java   |     6 +-
 .../ql/udf/generic/GenericUDAFPercentRank.java  |     6 +-
 .../generic/GenericUDAFPercentileApprox.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFRank.java    |     6 +-
 .../ql/udf/generic/GenericUDAFRowNumber.java    |     6 +-
 .../hive/ql/udf/generic/GenericUDAFSum.java     |     6 +-
 .../ql/udf/generic/GenericUDAFVariance.java     |     6 +-
 .../hive/ql/udf/generic/GenericUDAFnGrams.java  |     6 +-
 .../udf/generic/GenericUDFFromUtcTimestamp.java |     6 +-
 .../hive/ql/udf/generic/GenericUDFRegExp.java   |     8 +-
 .../ql/udf/generic/GenericUDFTimestamp.java     |     4 +-
 .../hive/ql/udf/generic/GenericUDFToChar.java   |     6 +-
 .../ql/udf/generic/GenericUDFToVarchar.java     |     6 +-
 .../ql/udf/generic/GenericUDFUnixTimeStamp.java |     6 +-
 .../ql/udf/generic/GenericUDTFJSONTuple.java    |     6 +-
 .../udf/generic/GenericUDTFParseUrlTuple.java   |     6 +-
 .../hive/ql/udf/generic/NGramEstimator.java     |     4 +-
 .../udf/generic/NumDistinctValueEstimator.java  |    14 +-
 .../hive/ql/udf/ptf/WindowingTableFunction.java |     6 +-
 .../hive/ql/util/ZooKeeperHiveHelper.java       |     6 +-
 .../hadoop/hive/ql/exec/TestExecDriver.java     |     6 +-
 .../hive/ql/exec/TestFileSinkOperator.java      |     6 +-
 .../hive/ql/exec/TestFunctionRegistry.java      |     2 +-
 .../hadoop/hive/ql/exec/TestUtilities.java      |     6 +-
 .../TestMapJoinMemoryExhaustionHandler.java     |     6 +-
 .../session/TestSparkSessionManagerImpl.java    |     6 +-
 .../hive/ql/exec/tez/TestTezSessionPool.java    |     6 +-
 .../hive/ql/exec/vector/TestVectorSerDeRow.java |    19 +-
 .../exec/vector/TestVectorizationContext.java   |     6 +-
 .../hive/ql/exec/vector/UDFHelloTest.java       |    69 +
 .../expressions/TestVectorDateExpressions.java  |    71 +-
 .../hadoop/hive/ql/io/TestAcidInputFormat.java  |    88 +
 .../apache/hadoop/hive/ql/io/TestRCFile.java    |     6 +-
 .../hive/ql/io/TestSymlinkTextInputFormat.java  |     8 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |     6 +-
 .../hive/ql/lockmgr/TestDbTxnManager.java       |    13 +-
 .../hive/ql/lockmgr/TestDummyTxnManager.java    |    10 +-
 .../hadoop/hive/ql/log/TestLog4j2Appenders.java |     2 +-
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |     6 +-
 .../hive/ql/session/TestSessionState.java       |     6 +-
 .../hive/ql/txn/compactor/CompactorTest.java    |     6 +-
 .../hive/ql/txn/compactor/TestCleaner.java      |     6 +-
 .../hive/ql/txn/compactor/TestInitiator.java    |     6 +-
 .../hive/ql/txn/compactor/TestWorker.java       |     6 +-
 .../clientnegative/authorization_import.q       |    39 +
 .../column_change_skewedcol_type1.q             |     2 -
 .../queries/clientnegative/column_rename5.q     |     2 -
 ...te_skewed_table_col_name_value_no_mismatch.q |     2 -
 .../create_skewed_table_dup_col_name.q          |     2 -
 ...eate_skewed_table_failure_invalid_col_name.q |     3 -
 .../disallow_incompatible_type_change_on1.q     |     6 +-
 .../clientnegative/drop_database_cascade.q      |    26 +
 .../queries/clientnegative/invalid_config1.q    |     3 -
 .../queries/clientnegative/invalid_config2.q    |     4 -
 .../clientnegative/load_stored_as_dirs.q        |     2 -
 .../set_hiveconf_internal_variable0.q           |     4 +
 .../set_hiveconf_internal_variable1.q           |     4 +
 .../truncate_column_list_bucketing.q            |     1 -
 .../clientpositive/add_jar_with_file_removed.q  |    15 +
 ql/src/test/queries/clientpositive/alter1.q     |     6 +-
 .../queries/clientpositive/alter_skewed_table.q |     2 -
 .../queries/clientpositive/avro_partitioned.q   |     3 +-
 .../cbo_rp_annotate_stats_groupby.q             |   141 +
 .../clientpositive/cbo_rp_unionDistinct_2.q     |   128 +
 .../test/queries/clientpositive/cbo_udf_max.q   |    36 +
 .../columnarserde_create_shortcut.q             |     2 +
 .../create_alter_list_bucketing_table1.q        |     2 -
 .../clientpositive/create_skewed_table1.q       |     1 -
 .../test/queries/clientpositive/explain_ddl.q   |    28 +
 .../test/queries/clientpositive/explainuser_3.q |    46 +-
 .../clientpositive/groupby_grouping_id3.q       |    22 +
 .../queries/clientpositive/groupby_sort_8.q     |     6 -
 .../clientpositive/groupby_sort_test_1.q        |     1 -
 .../infer_bucket_sort_list_bucket.q             |     3 +-
 ql/src/test/queries/clientpositive/input3.q     |    10 +-
 .../queries/clientpositive/insert_dir_distcp.q  |     9 +
 .../clientpositive/insert_values_nonascii.q     |     9 +
 .../clientpositive/insertoverwrite_bucket.q     |     9 +
 .../test/queries/clientpositive/lb_fs_stats.q   |     1 -
 ql/src/test/queries/clientpositive/lineage2.q   |    18 +
 ql/src/test/queries/clientpositive/lineage3.q   |     3 +-
 .../queries/clientpositive/list_bucket_dml_1.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_10.q |     2 -
 .../queries/clientpositive/list_bucket_dml_11.q |     1 -
 .../queries/clientpositive/list_bucket_dml_12.q |     1 -
 .../queries/clientpositive/list_bucket_dml_13.q |     1 -
 .../queries/clientpositive/list_bucket_dml_14.q |     1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_3.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_7.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |     1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |     1 -
 .../list_bucket_query_multiskew_1.q             |     1 -
 .../list_bucket_query_multiskew_2.q             |     1 -
 .../list_bucket_query_multiskew_3.q             |     1 -
 .../list_bucket_query_oneskew_1.q               |     1 -
 .../list_bucket_query_oneskew_2.q               |     1 -
 .../list_bucket_query_oneskew_3.q               |     1 -
 .../queries/clientpositive/macro_duplicate.q    |     2 +-
 ql/src/test/queries/clientpositive/mrr.q        |     2 +
 .../queries/clientpositive/non_ascii_literal1.q |     1 +
 .../queries/clientpositive/non_ascii_literal2.q |     5 +
 .../clientpositive/orc_int_type_promotion.q     |     2 +
 .../clientpositive/parquet_schema_evolution.q   |     6 +-
 .../partition_wise_fileformat11.q               |     4 +-
 .../partition_wise_fileformat12.q               |     4 +-
 .../partition_wise_fileformat13.q               |     5 +-
 .../partition_wise_fileformat15.q               |     4 +-
 .../partition_wise_fileformat16.q               |     4 +-
 ql/src/test/queries/clientpositive/pcs.q        |    66 +
 .../test/queries/clientpositive/pointlookup.q   |     6 +-
 .../test/queries/clientpositive/pointlookup2.q  |     2 +-
 .../test/queries/clientpositive/pointlookup3.q  |     2 +-
 .../test/queries/clientpositive/pointlookup4.q  |    27 +
 .../test/queries/clientpositive/quotedid_skew.q |     1 -
 .../test/queries/clientpositive/recursive_dir.q |     1 -
 .../test/queries/clientpositive/rename_column.q |     4 +-
 .../queries/clientpositive/skewjoin_mapjoin1.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin10.q |     1 -
 .../queries/clientpositive/skewjoin_mapjoin11.q |     1 -
 .../queries/clientpositive/skewjoin_mapjoin2.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin3.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin4.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin5.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin6.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin7.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin8.q  |     1 -
 .../queries/clientpositive/skewjoin_mapjoin9.q  |     1 -
 .../clientpositive/skewjoin_union_remove_1.q    |     1 -
 .../clientpositive/skewjoin_union_remove_2.q    |     1 -
 .../test/queries/clientpositive/skewjoinopt1.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt10.q |     1 -
 .../test/queries/clientpositive/skewjoinopt11.q |     1 -
 .../test/queries/clientpositive/skewjoinopt12.q |     1 -
 .../test/queries/clientpositive/skewjoinopt13.q |     1 -
 .../test/queries/clientpositive/skewjoinopt14.q |     1 -
 .../test/queries/clientpositive/skewjoinopt15.q |     1 -
 .../test/queries/clientpositive/skewjoinopt16.q |     1 -
 .../test/queries/clientpositive/skewjoinopt17.q |     3 +-
 .../test/queries/clientpositive/skewjoinopt18.q |     1 -
 .../test/queries/clientpositive/skewjoinopt19.q |     1 -
 .../test/queries/clientpositive/skewjoinopt2.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt20.q |     1 -
 .../test/queries/clientpositive/skewjoinopt3.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt4.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt5.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt6.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt7.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt8.q  |     1 -
 .../test/queries/clientpositive/skewjoinopt9.q  |     1 -
 .../queries/clientpositive/stats_list_bucket.q  |     2 -
 .../queries/clientpositive/struct_in_view.q     |    28 +
 .../test/queries/clientpositive/tez_smb_empty.q |    55 +
 .../queries/clientpositive/tez_union_with_udf.q |    13 +
 .../truncate_column_list_bucket.q               |     1 -
 .../queries/clientpositive/union_remove_1.q     |     1 -
 .../queries/clientpositive/union_remove_10.q    |     1 -
 .../queries/clientpositive/union_remove_11.q    |     1 -
 .../queries/clientpositive/union_remove_12.q    |     1 -
 .../queries/clientpositive/union_remove_13.q    |     1 -
 .../queries/clientpositive/union_remove_14.q    |     1 -
 .../queries/clientpositive/union_remove_15.q    |     1 -
 .../queries/clientpositive/union_remove_16.q    |     1 -
 .../queries/clientpositive/union_remove_17.q    |     1 -
 .../queries/clientpositive/union_remove_18.q    |     1 -
 .../queries/clientpositive/union_remove_19.q    |     1 -
 .../queries/clientpositive/union_remove_2.q     |     1 -
 .../queries/clientpositive/union_remove_20.q    |     1 -
 .../queries/clientpositive/union_remove_21.q    |     1 -
 .../queries/clientpositive/union_remove_22.q    |     1 -
 .../queries/clientpositive/union_remove_23.q    |     1 -
 .../queries/clientpositive/union_remove_24.q    |     1 -
 .../queries/clientpositive/union_remove_25.q    |     1 -
 .../queries/clientpositive/union_remove_3.q     |     1 -
 .../queries/clientpositive/union_remove_4.q     |     1 -
 .../queries/clientpositive/union_remove_5.q     |     1 -
 .../queries/clientpositive/union_remove_6.q     |     1 -
 .../clientpositive/union_remove_6_subq.q        |     1 -
 .../queries/clientpositive/union_remove_7.q     |     1 -
 .../queries/clientpositive/union_remove_8.q     |     1 -
 .../queries/clientpositive/union_remove_9.q     |     1 -
 .../vector_custom_udf_configure.q               |    11 +
 .../queries/clientpositive/vector_reduce1.q     |    47 +
 .../queries/clientpositive/vector_reduce2.q     |    47 +
 .../queries/clientpositive/vector_reduce3.q     |    47 +
 .../queries/clientpositive/vectorized_case.q    |    19 +
 .../clientnegative/authorization_import.q.out   |    48 +
 .../authorization_uri_import.q.out              |    29 +
 .../disallow_incompatible_type_change_on1.q.out |     3 +-
 .../clientnegative/drop_database_cascade.q.out  |    85 +
 .../clientnegative/exchange_partition.q.out     |     2 +
 .../clientnegative/invalid_config1.q.out        |     2 -
 .../clientnegative/invalid_config2.q.out        |     2 -
 .../set_hiveconf_internal_variable0.q.out       |    11 +
 .../set_hiveconf_internal_variable1.q.out       |    11 +
 .../test/results/clientpositive/acid_join.q.out |     2 +-
 .../add_jar_with_file_removed.q.out             |    27 +
 .../alter_partition_change_col.q.out            |   240 +-
 .../clientpositive/alter_table_cascade.q.out    |    40 +-
 .../annotate_stats_deep_filters.q.out           |     2 +-
 .../clientpositive/annotate_stats_filter.q.out  |    48 +-
 .../clientpositive/annotate_stats_groupby.q.out |    56 +-
 .../annotate_stats_groupby2.q.out               |    32 +-
 .../clientpositive/annotate_stats_join.q.out    |    34 +-
 .../annotate_stats_join_pkfk.q.out              |    50 +-
 .../clientpositive/annotate_stats_limit.q.out   |     8 +-
 .../clientpositive/annotate_stats_part.q.out    |    14 +-
 .../clientpositive/annotate_stats_select.q.out  |    24 +-
 .../clientpositive/annotate_stats_table.q.out   |    12 +-
 .../clientpositive/annotate_stats_union.q.out   |    20 +-
 .../clientpositive/ansi_sql_arithmetic.q.out    |     2 +-
 .../clientpositive/auto_sortmerge_join_10.q.out |   100 +-
 .../results/clientpositive/avro_decimal.q.out   |    10 +-
 .../clientpositive/avro_decimal_native.q.out    |    10 +-
 .../results/clientpositive/bucket_groupby.q.out |    46 +-
 .../bucketizedhiveinputformat.q.out             |     2 +
 .../clientpositive/cast_qualified_types.q.out   |     2 +-
 .../cbo_rp_annotate_stats_groupby.q.out         |  1301 ++
 .../clientpositive/cbo_rp_auto_join0.q.out      |     8 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      |    30 +-
 .../results/clientpositive/cbo_rp_join0.q.out   |    14 +-
 .../clientpositive/cbo_rp_lineage2.q.out        |    68 +-
 .../clientpositive/cbo_rp_unionDistinct_2.q.out |   545 +
 .../results/clientpositive/cbo_udf_max.q.out    |    62 +
 .../results/clientpositive/decimal_1_1.q.out    |    48 +-
 .../test/results/clientpositive/decimal_3.q.out |   514 +-
 .../test/results/clientpositive/decimal_4.q.out |   144 +-
 .../test/results/clientpositive/decimal_5.q.out |   180 +-
 .../test/results/clientpositive/decimal_6.q.out |    92 +-
 .../results/clientpositive/decimal_join2.q.out  |   260 +-
 .../clientpositive/decimal_precision.q.out      |   170 +-
 .../clientpositive/decimal_trailing.q.out       |    42 +-
 .../results/clientpositive/decimal_udf.q.out    |   960 +-
 .../dynpart_sort_optimization_acid.q.out        |     4 +-
 .../clientpositive/exchange_partition.q.out     |     6 +
 .../clientpositive/exchange_partition2.q.out    |     6 +
 .../clientpositive/exchange_partition3.q.out    |     8 +
 .../clientpositive/exchgpartition2lel.q.out     |    18 +
 .../clientpositive/exim_00_nonpart_empty.q.out  |     2 +
 .../clientpositive/exim_01_nonpart.q.out        |     2 +
 .../clientpositive/exim_02_00_part_empty.q.out  |     2 +
 .../results/clientpositive/exim_02_part.q.out   |     2 +
 .../clientpositive/exim_04_all_part.q.out       |     2 +
 .../clientpositive/exim_04_evolved_parts.q.out  |     2 +
 .../clientpositive/exim_05_some_part.q.out      |     2 +
 .../clientpositive/exim_06_one_part.q.out       |     2 +
 .../clientpositive/exim_08_nonpart_rename.q.out |     2 +
 .../exim_10_external_managed.q.out              |     2 +
 .../exim_11_managed_external.q.out              |     2 +
 .../exim_12_external_location.q.out             |     2 +
 .../exim_13_managed_location.q.out              |     2 +
 .../clientpositive/exim_18_part_external.q.out  |     2 +
 .../exim_19_00_part_external_location.q.out     |     2 +
 .../exim_19_part_external_location.q.out        |     2 +
 .../exim_20_part_managed_location.q.out         |     2 +
 .../exim_24_import_nonexist_authsuccess.q.out   |     2 +
 .../clientpositive/exim_hidden_files.q.out      |     2 +
 .../results/clientpositive/explain_ddl.q.out    |   604 +
 .../extrapolate_part_stats_full.q.out           |     8 +-
 .../extrapolate_part_stats_partial.q.out        |    12 +-
 .../extrapolate_part_stats_partial_ndv.q.out    |     6 +-
 .../clientpositive/groupby_grouping_id3.q.out   |    60 +
 .../results/clientpositive/groupby_sort_8.q.out |    64 -
 .../clientpositive/groupby_sort_test_1.q.out    |    87 +-
 .../clientpositive/import_exported_table.q.out  |     3 +
 .../clientpositive/infer_bucket_sort.q.out      |     4 +-
 .../clientpositive/insert_dir_distcp.q.out      |    14 +
 .../insert_nonacid_from_acid.q.out              |    20 +-
 .../clientpositive/insert_values_nonascii.q.out |    28 +
 .../clientpositive/insertoverwrite_bucket.q.out |    78 +
 .../test/results/clientpositive/lineage2.q.out  |    98 +-
 .../test/results/clientpositive/lineage3.q.out  |    60 +-
 .../clientpositive/llap/constprog_dpp.q.out     |    10 +-
 .../llap/dynamic_partition_pruning.q.out        |    45 -
 .../llap/hybridgrace_hashjoin_1.q.out           |   204 +-
 .../clientpositive/llap/llapdecider.q.out       |    46 +-
 .../clientpositive/llap/mapjoin_decimal.q.out   |   424 +-
 .../vectorized_dynamic_partition_pruning.q.out  |    45 -
 .../clientpositive/load_dyn_part15.q.out        |     6 +-
 .../clientpositive/macro_duplicate.q.out        |     4 +-
 .../multi_insert_lateral_view.q.out             |    36 +-
 .../clientpositive/non_ascii_literal1.q.out     |     9 +
 .../clientpositive/non_ascii_literal2.q.out     |    23 +
 .../results/clientpositive/orc_file_dump.q.out  |     6 +-
 .../clientpositive/orc_predicate_pushdown.q.out |     4 +-
 .../clientpositive/parquet_decimal.q.out        |    16 +-
 .../clientpositive/parquet_ppd_boolean.q.out    |   180 +-
 .../clientpositive/parquet_ppd_char.q.out       |   220 +-
 .../clientpositive/parquet_ppd_date.q.out       |   330 +-
 .../clientpositive/parquet_ppd_decimal.q.out    |   660 +-
 .../clientpositive/parquet_ppd_timestamp.q.out  |   320 +-
 .../clientpositive/parquet_ppd_varchar.q.out    |   220 +-
 .../parquet_predicate_pushdown.q.out            |     4 +-
 ql/src/test/results/clientpositive/pcs.q.out    |  2249 +++
 .../results/clientpositive/pointlookup.q.out    |     8 +-
 .../results/clientpositive/pointlookup4.q.out   |   530 +
 .../clientpositive/repl_2_exim_basic.q.out      |     4 +
 .../results/clientpositive/serde_regex.q.out    |    74 +-
 .../spark/annotate_stats_join.q.out             |    34 +-
 .../spark/auto_sortmerge_join_10.q.out          |    45 +-
 .../spark/avro_decimal_native.q.out             |    10 +-
 .../spark/bucketizedhiveinputformat.q.out       |     2 +
 .../clientpositive/spark/decimal_1_1.q.out      |    48 +-
 .../spark/import_exported_table.q.out           |     3 +
 .../clientpositive/spark/load_dyn_part15.q.out  |     6 +-
 .../clientpositive/spark/mapjoin_decimal.q.out  |   424 +-
 .../spark/multi_insert_lateral_view.q.out       |    36 +-
 .../spark/union_lateralview.q.out               |     4 +-
 .../spark/vector_between_in.q.out               |    14 +-
 .../spark/vector_cast_constant.q.java1.7.out    |    20 +-
 .../spark/vector_data_types.q.out               |     4 +-
 .../spark/vector_decimal_aggregate.q.out        |    32 +-
 .../spark/vector_decimal_mapjoin.q.out          |   212 +-
 .../clientpositive/spark/vectorized_case.q.out  |   109 +-
 .../results/clientpositive/stats_ppr_all.q.out  |     8 +-
 .../results/clientpositive/struct_in_view.q.out |   118 +
 .../clientpositive/sum_expr_with_order.q.out    |     2 +-
 .../tez/auto_sortmerge_join_10.q.out            |    71 +-
 .../tez/dynamic_partition_pruning.q.out         |    45 -
 .../clientpositive/tez/explainuser_1.q.out      |   368 +-
 .../clientpositive/tez/explainuser_2.q.out      |    38 +
 .../clientpositive/tez/explainuser_3.q.out      |   230 +-
 .../tez/hybridgrace_hashjoin_1.q.out            |   204 +-
 .../clientpositive/tez/insert_dir_distcp.q.out  |    14 +
 .../clientpositive/tez/llapdecider.q.out        |    46 +-
 .../clientpositive/tez/mapjoin_decimal.q.out    |   424 +-
 .../clientpositive/tez/tez_smb_empty.q.out      |   676 +
 .../clientpositive/tez/tez_union_with_udf.q.out |    36 +
 .../clientpositive/tez/update_all_types.q.out   |    30 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |     2 +-
 .../tez/vector_aggregate_without_gby.q.out      |    85 +
 .../tez/vector_auto_smb_mapjoin_14.q.out        |    32 +-
 .../clientpositive/tez/vector_between_in.q.out  |    14 +-
 .../clientpositive/tez/vector_bround.q.out      |    66 +
 .../tez/vector_cast_constant.q.java1.7.out      |    20 +-
 .../clientpositive/tez/vector_data_types.q.out  |     4 +-
 .../clientpositive/tez/vector_decimal_2.q.out   |     4 +-
 .../clientpositive/tez/vector_decimal_3.q.out   |   514 +-
 .../clientpositive/tez/vector_decimal_4.q.out   |   288 +-
 .../clientpositive/tez/vector_decimal_5.q.out   |   180 +-
 .../clientpositive/tez/vector_decimal_6.q.out   |   172 +-
 .../tez/vector_decimal_aggregate.q.out          |    32 +-
 .../tez/vector_decimal_cast.q.out               |    20 +-
 .../tez/vector_decimal_expressions.q.out        |    20 +-
 .../tez/vector_decimal_mapjoin.q.out            |   212 +-
 .../tez/vector_decimal_precision.q.out          |   170 +-
 .../tez/vector_decimal_round_2.q.out            |    14 +-
 .../tez/vector_decimal_trailing.q.out           |    42 +-
 .../clientpositive/tez/vector_decimal_udf.q.out |   960 +-
 .../results/clientpositive/tez/vector_nvl.q.out |   194 +
 .../clientpositive/tez/vector_reduce1.q.out     |  2167 +++
 .../clientpositive/tez/vector_reduce2.q.out     |  2167 +++
 .../clientpositive/tez/vector_reduce3.q.out     |  2167 +++
 .../tez/vector_reduce_groupby_decimal.q.out     |    98 +-
 .../clientpositive/tez/vector_struct_in.q.out   |   645 +
 .../tez/vectorization_part_varchar.q.out        |    72 +
 .../clientpositive/tez/vectorized_case.q.out    |   109 +-
 .../tez/vectorized_distinct_gby.q.out           |     4 +-
 .../vectorized_dynamic_partition_pruning.q.out  |    45 -
 .../tez/vectorized_parquet_types.q.out          |   151 +-
 .../tez/vectorized_timestamp_ints_casts.q.out   |    50 +-
 .../clientpositive/union_lateralview.q.out      |     4 +-
 .../clientpositive/update_all_types.q.out       |    30 +-
 .../clientpositive/vector_aggregate_9.q.out     |     2 +-
 .../clientpositive/vector_between_in.q.out      |    14 +-
 .../vector_cast_constant.q.java1.7.out          |    20 +-
 .../vector_custom_udf_configure.q.out           |    70 +
 .../clientpositive/vector_data_types.q.out      |     4 +-
 .../clientpositive/vector_decimal_2.q.out       |     4 +-
 .../clientpositive/vector_decimal_3.q.out       |   514 +-
 .../clientpositive/vector_decimal_4.q.out       |   288 +-
 .../clientpositive/vector_decimal_5.q.out       |   180 +-
 .../clientpositive/vector_decimal_6.q.out       |   172 +-
 .../vector_decimal_aggregate.q.out              |    32 +-
 .../clientpositive/vector_decimal_cast.q.out    |    20 +-
 .../vector_decimal_expressions.q.out            |    20 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |   212 +-
 .../vector_decimal_precision.q.out              |   170 +-
 .../clientpositive/vector_decimal_round_2.q.out |    14 +-
 .../vector_decimal_trailing.q.out               |    42 +-
 .../clientpositive/vector_decimal_udf.q.out     |   960 +-
 .../results/clientpositive/vector_reduce1.q.out |  2160 +++
 .../results/clientpositive/vector_reduce2.q.out |  2160 +++
 .../results/clientpositive/vector_reduce3.q.out |  2160 +++
 .../vector_reduce_groupby_decimal.q.out         |    98 +-
 .../clientpositive/vectorized_case.q.out        |    69 +
 .../vectorized_distinct_gby.q.out               |     4 +-
 .../clientpositive/windowing_decimal.q.out      |   104 +-
 .../clientpositive/windowing_navfn.q.out        |    20 +-
 .../results/clientpositive/windowing_rank.q.out |    60 +-
 .../clientpositive/windowing_windowspec3.q.out  |    18 +-
 serde/pom.xml                                   |     5 -
 .../hive/serde2/AbstractEncodingAwareSerDe.java |     6 +-
 .../hadoop/hive/serde2/AbstractSerDe.java       |     9 +
 .../hive/serde2/ColumnProjectionUtils.java      |    15 +-
 .../hadoop/hive/serde2/DelimitedJSONSerDe.java  |     6 +-
 .../serde2/MetadataTypedColumnsetSerDe.java     |     8 +-
 .../apache/hadoop/hive/serde2/OpenCSVSerde.java |     6 +-
 .../apache/hadoop/hive/serde2/RegexSerDe.java   |     6 +-
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |     9 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java |    58 +-
 .../hive/serde2/avro/AvroDeserializer.java      |     6 +-
 .../serde2/avro/AvroLazyObjectInspector.java    |     8 +-
 .../hadoop/hive/serde2/avro/AvroSerDe.java      |    25 +-
 .../hadoop/hive/serde2/avro/AvroSerdeUtils.java |     6 +-
 .../hadoop/hive/serde2/avro/AvroSerializer.java |     4 +-
 .../hadoop/hive/serde2/avro/InstanceCache.java  |     6 +-
 .../binarysortable/BinarySortableSerDe.java     |     6 +-
 .../fast/BinarySortableDeserializeRead.java     |    33 +-
 .../fast/BinarySortableSerializeWrite.java      |    17 +-
 .../hive/serde2/columnar/ColumnarSerDe.java     |     8 +-
 .../hive/serde2/columnar/ColumnarStruct.java    |     6 +-
 .../hive/serde2/dynamic_type/DynamicSerDe.java  |     6 +-
 .../hive/serde2/fast/DeserializeRead.java       |     6 +-
 .../hadoop/hive/serde2/fast/SerializeWrite.java |     2 +-
 .../hadoop/hive/serde2/io/DateWritable.java     |     1 +
 .../serde2/io/HiveIntervalDayTimeWritable.java  |     6 +-
 .../io/HiveIntervalYearMonthWritable.java       |     6 +-
 .../hadoop/hive/serde2/lazy/LazyBinary.java     |     8 +-
 .../hadoop/hive/serde2/lazy/LazyDate.java       |     6 +-
 .../hadoop/hive/serde2/lazy/LazyDouble.java     |     6 +-
 .../hadoop/hive/serde2/lazy/LazyFloat.java      |     6 +-
 .../hadoop/hive/serde2/lazy/LazyHiveChar.java   |     6 +-
 .../hive/serde2/lazy/LazyHiveDecimal.java       |    10 +-
 .../hive/serde2/lazy/LazyHiveVarchar.java       |     6 +-
 .../apache/hadoop/hive/serde2/lazy/LazyMap.java |     6 +-
 .../hadoop/hive/serde2/lazy/LazyPrimitive.java  |     6 +-
 .../hive/serde2/lazy/LazySerDeParameters.java   |     6 +-
 .../hive/serde2/lazy/LazySimpleSerDe.java       |     5 -
 .../hadoop/hive/serde2/lazy/LazyStruct.java     |     6 +-
 .../hadoop/hive/serde2/lazy/LazyTimestamp.java  |    10 +-
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |     3 +-
 .../lazy/fast/LazySimpleDeserializeRead.java    |   171 +-
 .../lazy/fast/LazySimpleSerializeWrite.java     |    24 +-
 .../LazyListObjectInspector.java                |     6 +-
 .../objectinspector/LazyMapObjectInspector.java |     6 +-
 .../LazyUnionObjectInspector.java               |     8 +-
 .../hive/serde2/lazybinary/LazyBinaryDate.java  |     6 +-
 .../LazyBinaryHiveIntervalDayTime.java          |     6 +-
 .../LazyBinaryHiveIntervalYearMonth.java        |     6 +-
 .../hive/serde2/lazybinary/LazyBinaryMap.java   |     6 +-
 .../hive/serde2/lazybinary/LazyBinarySerDe.java |     6 +-
 .../serde2/lazybinary/LazyBinaryStruct.java     |     6 +-
 .../serde2/lazybinary/LazyBinaryTimestamp.java  |     6 +-
 .../hive/serde2/lazybinary/LazyBinaryUnion.java |     6 +-
 .../hive/serde2/lazybinary/LazyBinaryUtils.java |     2 +-
 .../fast/LazyBinaryDeserializeRead.java         |    31 +-
 .../fast/LazyBinarySerializeWrite.java          |     8 +-
 .../objectinspector/ObjectInspectorUtils.java   |    32 +-
 .../StandardStructObjectInspector.java          |     8 +-
 .../PrimitiveObjectInspectorUtils.java          |     6 +-
 .../WritableHiveVarcharObjectInspector.java     |     6 +-
 .../serde2/thrift/TBinarySortableProtocol.java  |     6 +-
 .../serde2/thrift/TCTLSeparatedProtocol.java    |     6 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |    98 +-
 .../apache/hadoop/hive/serde2/VerifyFast.java   |     9 +-
 .../hive/serde2/avro/TestTypeInfoToSchema.java  |     7 +-
 .../binarysortable/TestBinarySortableFast.java  |     3 +-
 .../hive/serde2/lazy/TestLazySimpleFast.java    |     3 +-
 .../serde2/lazybinary/TestLazyBinaryFast.java   |     3 +-
 service/pom.xml                                 |     5 -
 .../apache/hive/service/AbstractService.java    |     6 +-
 .../apache/hive/service/CompositeService.java   |     6 +-
 .../org/apache/hive/service/CookieSigner.java   |     6 +-
 .../apache/hive/service/ServiceOperations.java  |     6 +-
 .../org/apache/hive/service/ServiceUtils.java   |    25 +
 .../hive/service/auth/HiveAuthFactory.java      |     8 +-
 .../apache/hive/service/auth/HttpAuthUtils.java |     6 +-
 .../auth/LdapAuthenticationProviderImpl.java    |     6 +-
 .../org/apache/hive/service/cli/CLIService.java |     8 +-
 .../cli/operation/GetTablesOperation.java       |    47 +-
 .../cli/operation/HiveCommandOperation.java     |    10 +-
 .../cli/operation/LogDivertAppender.java        |     7 +-
 .../cli/operation/MetadataOperation.java        |    23 +-
 .../hive/service/cli/operation/Operation.java   |     6 +-
 .../service/cli/operation/OperationManager.java |     9 +-
 .../service/cli/session/HiveSessionImpl.java    |    33 +-
 .../cli/session/HiveSessionImplwithUGI.java     |     6 +-
 .../service/cli/session/SessionManager.java     |     6 +-
 .../thrift/RetryingThriftCLIServiceClient.java  |     6 +-
 .../cli/thrift/ThriftBinaryCLIService.java      |     2 +-
 .../service/cli/thrift/ThriftCLIService.java    |     6 +-
 .../cli/thrift/ThriftHttpCLIService.java        |     3 +-
 .../service/cli/thrift/ThriftHttpServlet.java   |     6 +-
 .../apache/hive/service/server/HiveServer2.java |    17 +-
 .../server/ThreadWithGarbageCleanup.java        |     6 +-
 .../apache/hive/service/cli/CLIServiceTest.java |     6 +-
 shims/0.23/pom.xml                              |     5 -
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |     3 +
 .../apache/hadoop/mapred/WebHCatJTShim23.java   |    10 +-
 shims/common/pom.xml                            |    16 +-
 .../org/apache/hadoop/fs/DefaultFileAccess.java |     6 +-
 .../apache/hadoop/hive/shims/HadoopShims.java   |    18 +-
 .../hadoop/hive/shims/HadoopShimsSecure.java    |     6 +-
 .../apache/hadoop/hive/thrift/DBTokenStore.java |     7 +-
 .../hive/thrift/HadoopThriftAuthBridge.java     |    12 +-
 .../hadoop/hive/thrift/ZooKeeperTokenStore.java |     2 +-
 shims/scheduler/pom.xml                         |     5 -
 .../hadoop/hive/schshim/FairSchedulerShim.java  |     6 +-
 .../hive/spark/client/SparkClientUtilities.java |     7 +-
 .../hive/spark/counter/SparkCounters.java       |     6 +-
 .../hadoop/hive/common/io/DiskRangeList.java    |     6 +-
 .../hadoop/hive/common/type/HiveDecimal.java    |    11 +
 .../hive/ql/exec/vector/ColumnVector.java       |     3 +-
 .../ql/exec/vector/DecimalColumnVector.java     |     2 -
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |     5 -
 .../hive/serde2/io/HiveDecimalWritable.java     |     4 -
 testutils/ptest2/pom.xml                        |     5 -
 1287 files changed, 57004 insertions(+), 21783 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 2ab9c2d,6951993..6abef4e
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@@ -24,9 -23,8 +24,10 @@@ import java.util.List
  import java.util.Map;
  import java.util.Set;
  
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
 +import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.hive.common.JavaUtils;
  import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper;

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
index 52f4b9c,84603d5..09c54c1
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
@@@ -21,19 -21,33 +21,20 @@@ import java.util.Iterator
  import java.util.List;
  import java.util.Map;
  
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
 +import org.apache.spark.JavaSparkListener;
  import org.apache.spark.executor.TaskMetrics;
 -import org.apache.spark.scheduler.SparkListener;
 -import org.apache.spark.scheduler.SparkListenerApplicationEnd;
 -import org.apache.spark.scheduler.SparkListenerApplicationStart;
 -import org.apache.spark.scheduler.SparkListenerBlockManagerAdded;
 -import org.apache.spark.scheduler.SparkListenerBlockManagerRemoved;
 -import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate;
 -import org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate;
 -import org.apache.spark.scheduler.SparkListenerJobEnd;
  import org.apache.spark.scheduler.SparkListenerJobStart;
 -import org.apache.spark.scheduler.SparkListenerStageCompleted;
 -import org.apache.spark.scheduler.SparkListenerStageSubmitted;
  import org.apache.spark.scheduler.SparkListenerTaskEnd;
 -import org.apache.spark.scheduler.SparkListenerTaskGettingResult;
 -import org.apache.spark.scheduler.SparkListenerTaskStart;
 -import org.apache.spark.scheduler.SparkListenerUnpersistRDD;
 -import org.apache.spark.scheduler.SparkListenerExecutorRemoved;
 -import org.apache.spark.scheduler.SparkListenerExecutorAdded;
  
  import com.google.common.collect.Lists;
  import com.google.common.collect.Maps;
  
 -public class JobMetricsListener implements SparkListener {
 +public class JobMetricsListener extends JavaSparkListener {
  
-   private static final Log LOG = LogFactory.getLog(JobMetricsListener.class);
+   private static final Logger LOG = LoggerFactory.getLogger(JobMetricsListener.class);
  
    private final Map<Integer, int[]> jobIdToStageId = Maps.newHashMap();
    private final Map<Integer, Integer> stageIdToJobId = Maps.newHashMap();

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
----------------------------------------------------------------------
diff --cc spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
index bbbd97b,cd38346..b779f3f
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
@@@ -24,20 -24,18 +24,21 @@@ import java.io.File
  import java.net.URL;
  import java.net.URLClassLoader;
  import java.util.List;
 -import java.util.Set;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentHashMap;
  
  import org.apache.commons.lang.StringUtils;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  
  public class SparkClientUtilities {
-   protected static final transient Log LOG = LogFactory.getLog(SparkClientUtilities.class);
+   protected static final transient Logger LOG = LoggerFactory.getLogger(SparkClientUtilities.class);
+ 
 +  private static final Map<String, Long> downloadedFiles = new ConcurrentHashMap<>();
 +
    /**
     * Add new elements to the classpath.
     *


[26/55] [abbrv] hive git commit: HIVE-12344: Wrong types inferred for SemiJoin generation in CBO (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)

Posted by xu...@apache.org.
HIVE-12344: Wrong types inferred for SemiJoin generation in CBO (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1305ea94
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1305ea94
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1305ea94

Branch: refs/heads/spark
Commit: 1305ea94621ce04732ef5203802018ee8d7c1640
Parents: 2ae1c5c
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Nov 6 17:27:56 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Nov 6 17:27:56 2015 +0100

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1305ea94/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index e2f1cfb..90c2067 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -234,8 +234,8 @@ public class HiveCalciteUtil {
       leftKeys.add(origLeftInputSize + i);
       rightKeys.add(origRightInputSize + i);
       RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
-          rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newLeftOffset + i),
-          rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newRightOffset + i));
+          rexBuilder.makeInputRef(newLeftFields.get(origLeftInputSize + i).getType(), newLeftOffset + i),
+          rexBuilder.makeInputRef(newRightFields.get(origRightInputSize + i).getType(), newRightOffset + i));
       if (outJoinCond == null) {
         outJoinCond = cond;
       } else {


[49/55] [abbrv] hive git commit: HIVE-12309 : TableScan should colStats when available for better data size estimate (Ashutosh Chauhan via Prasanth J)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index 3b053fe..6537a8a 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -96,7 +96,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
@@ -184,7 +184,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq2:subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
@@ -327,7 +327,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src2:subq2:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
@@ -406,7 +406,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src1:subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
@@ -525,7 +525,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
@@ -622,7 +622,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq2:subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
@@ -743,7 +743,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq2:subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
@@ -854,7 +854,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 8) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
@@ -942,7 +942,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key + 1) is not null (type: boolean)
               Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -957,7 +957,7 @@ STAGE PLANS:
                   Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: subq2:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key + 1) is not null (type: boolean)
               Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1063,7 +1063,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1152,7 +1152,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1259,7 +1259,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a:subq2:subq1:a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1348,7 +1348,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1429,7 +1429,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
index a8bcc90..3c6bb73 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
@@ -20,7 +20,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: cbo_t1
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
@@ -36,7 +36,7 @@ STAGE PLANS:
                   value expressions: c_int (type: int)
           TableScan
             alias: cbo_t2:cbo_t2
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
@@ -52,7 +52,7 @@ STAGE PLANS:
                   value expressions: c_int (type: int)
           TableScan
             alias: cbo_t3:cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string)
               outputColumnNames: key
@@ -669,7 +669,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: cbo_t1
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
@@ -685,7 +685,7 @@ STAGE PLANS:
                   value expressions: c_int (type: int)
           TableScan
             alias: cbo_t2:cbo_t2
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
@@ -701,7 +701,7 @@ STAGE PLANS:
                   value expressions: c_int (type: int)
           TableScan
             alias: cbo_t3:cbo_t3
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string)
               outputColumnNames: key
@@ -713,7 +713,7 @@ STAGE PLANS:
                 Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: cbo_t4:cbo_t1
-            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
index f87a539..473ee0e 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
@@ -204,7 +204,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string)
@@ -337,7 +337,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int)
@@ -626,7 +626,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_2d
-          Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string)
@@ -845,7 +845,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_2d
-          Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
index 5903cd1..6c1fc13 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
@@ -307,7 +307,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string)
@@ -526,7 +526,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int)
@@ -758,7 +758,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string)
@@ -973,7 +973,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int)
@@ -1571,7 +1571,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_2d
-          Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string)
@@ -2098,7 +2098,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_2d
-          Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
index 2ea1e6e..975dd50 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
@@ -384,7 +384,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 4186 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 4260 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: double), cnt (type: decimal(10,0)), zip (type: int)
@@ -697,7 +697,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_1d
-          Statistics: Num rows: 20 Data size: 4186 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 20 Data size: 4220 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: double), cnt (type: decimal(10,0)), zip (type: int)
@@ -1375,7 +1375,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc_2d
-          Statistics: Num rows: 20 Data size: 4028 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 20 Data size: 4160 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int), cnt (type: decimal(10,0)), zip (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/llap/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llapdecider.q.out b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
index 676a0e4..fd33181 100644
--- a/ql/src/test/results/clientpositive/llap/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
@@ -20,11 +20,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count(value)
                       keys: key (type: string)
@@ -251,11 +251,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
-                    Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count(value)
                       keys: key (type: string)
@@ -324,7 +324,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -343,7 +343,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -412,7 +412,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -431,7 +431,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -503,7 +503,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -522,7 +522,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -593,7 +593,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -611,7 +611,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -680,7 +680,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -698,7 +698,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -767,7 +767,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -786,7 +786,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -856,7 +856,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -874,7 +874,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -943,7 +943,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -962,7 +962,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1039,7 +1039,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(key) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1098,7 +1098,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(key) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1155,7 +1155,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src_orc
-                  Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToInteger(GenericUDFTestGetJavaString(key)) > 1) (type: boolean)
                     Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
index 8955a61..9cbc411 100644
--- a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
@@ -169,7 +169,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -187,7 +187,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -248,7 +248,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: emp
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and lastname is not null) (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -266,7 +266,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: dept
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and deptname is not null) (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -322,7 +322,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and lastname is not null) (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -340,7 +340,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and deptname is not null) (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -400,7 +400,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: emp
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and lastname is not null) (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -418,7 +418,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: dept
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and deptname is not null) (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -478,7 +478,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -496,7 +496,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -514,7 +514,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -575,7 +575,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -593,7 +593,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: deptid is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -611,7 +611,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: l
-                  Statistics: Num rows: 8 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: locid is not null (type: boolean)
                     Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
@@ -674,7 +674,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: e
-                  Statistics: Num rows: 48 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and lastname is not null) (type: boolean)
                     Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
@@ -692,7 +692,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (deptid is not null and deptname is not null) (type: boolean)
                     Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
@@ -709,7 +709,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: l
-                  Statistics: Num rows: 8 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (locid is not null and state is not null) (type: boolean)
                     Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/stats_ppr_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_ppr_all.q.out b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
index 7627f7a..c63c5b7 100644
--- a/ql/src/test/results/clientpositive/stats_ppr_all.q.out
+++ b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
@@ -74,11 +74,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: order_amount (type: float)
               outputColumnNames: order_amount
-              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 aggregations: sum(order_amount)
                 mode: hash
@@ -173,11 +173,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: ss
-            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: order_amount (type: float)
               outputColumnNames: order_amount
-              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 aggregations: sum(order_amount)
                 mode: hash


[08/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/92620d8e/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index ad965c8..fb5e9df 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -10,7 +10,7 @@ insert into table d1 select x + length(y)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@d1
-{"version":"1.0","engine":"mr","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: drop table if exists d2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table d2(b varchar(128))
@@ -25,7 +25,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@d1
 PREHOOK: Output: default@d2
-{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint
 "},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"def
 ault.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table t as
@@ -36,7 +36,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"761b3a1f405d8e719d3f0c9147b57a23","queryText":"create table t as\nselect * from\n  (select * from\n     (select key from src1 limit 1) v1) v2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"761b3a1f405d8e719d3f0c9147b57a23","queryText":"create table t as\nselect * from\n  (select * from\n     (select key from src1 limit 1) v1) v2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l1(a int, b varchar(128))
@@ -51,7 +51,7 @@ where cint is not null and cint < 0 order by cint, cs limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=today
-{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: insert into table dest_l1 partition (ds='tomorrow')
 select min(cint), cast(min(cstring1) as varchar(128)) as cs
 from alltypesorc
@@ -61,13 +61,13 @@ having min(cbigint) > 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=tomorrow
-{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},
 {"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"defaul
 t.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc
 where cint > 10 and cint < 10000 limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.a
 lltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN",
 "vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
 762	1
 762	1
 762	1
@@ -86,7 +86,7 @@ order by a.ctinyint, a.cint
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n  over(partition by a.ctinyint order by a.cint desc\n    rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (. (tok_table_or_col $hdt$_0) cint)))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":1,"vertexType":"CO
 LUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n  over(partition by a.ctinyint order by a.cint desc\n    rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (. (tok_table_or_col $hdt$_0) cint)))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"i
 d":1,"vertexType":"COLUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: with v2 as
   (select cdouble, count(cint) over() a,
     sum(cint + cbigint) over(partition by cboolean1) b
@@ -97,7 +97,7 @@ order by cdouble, a, b limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n  (select cdouble, count(cint) over() a,\n    sum(cint + cbigint) over(partition by cboolean1) b\n    from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (
 tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"targets":[4],"expressio
 n":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltype
 sorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n  (select cdouble, count(cint) over() a,\n    sum(cint + cbigint) over(partition by cboolean1) b\n    from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (
 tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"ta
 rgets":[4],"expression":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","verte
 xId":"default.alltypesorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
 -16379.0	9173	-919551973060	-919551963887	-7206.0
 -16373.0	9173	-919551973060	-919551963887	-7200.0
 -16372.0	9173	-919551973060	-919551963887	-7199.0
@@ -116,7 +116,7 @@ order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"alltypesorc.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint < 100)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = alltypesorc.ctinyint
 )","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"alltypesorc.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint < 100)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint =
  alltypesorc.ctinyint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1067683781	-51
@@ -135,7 +135,7 @@ and x.ctinyint + length(c.cstring2) < 1000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - UDFToLong(100))","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - UDFToLong(224870380)) =
  UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(c.cboolean1 = false)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(c.ctinyint > 10)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN","vertexId":"default
 .alltypesorc.cstring2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - UDFToLong(100))","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - UD
 FToLong(224870380)) = UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(c.cboolean1 = false)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(c.ctinyint > 10)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN
 ","vertexId":"default.alltypesorc.cstring2"}]}
 11	-654374827	857266369	OEfPnHnIYueoup
 PREHOOK: query: select c1, x2, x3
 from (
@@ -158,7 +158,7 @@ order by x2, c1 desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bc64f8bec21631969a17930ec609cde9","queryText":"select c1, x2, x3\nfrom (\n  select c1, min(c2) x2, sum(c3) x3\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cbigint) c3\n      from alltypesorc\n      where cint is not null\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 5\n    ) x\n  ) x2\n  group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c1"},
 {"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bc64f8bec21631969a17930ec609cde9","queryText":"select c1, x2, x3\nfrom (\n  select c1, min(c2) x2, sum(c3) x3\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cbigint) c3\n      from alltypesorc\n      where cint is not null\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 5\n    ) x\n  ) x2\n  group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLU
 MN","vertexId":"c1"},{"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 -1072910839	11	2048385991
 -1073279343	11	-1595604468
 PREHOOK: query: select key, value from src1
@@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 273	val_273
 PREHOOK: query: select * from src1 a
@@ -178,7 +178,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 311	val_311
 Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select key, value from src1
@@ -186,7 +186,7 @@ where key not in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"
 default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":
 "COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]}
 PREHOOK: query: select * from src1 a
 where not exists
   (select cint from alltypesorc b
@@ -196,7 +196,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src
 1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","v
 ertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 369	
 401	val_401
 406	val_406
@@ -205,7 +205,7 @@ select x, y from t where y > 'v' order by x, y limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"d40d80b93db06c12df9a6ccdc108a9d1","queryText":"with t as (select key x, value y from src1 where key > '2')\nselect x, y from t where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"d40d80b93db06c12df9a6ccdc108a9d1","queryText":"with t as (select key x, value y from src1 where key > '2')\nselect x, y from t where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 213	val_213
 238	val_238
 255	val_255
@@ -216,7 +216,7 @@ select x, y where y > 'v' order by x, y limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"9180b71a610dbcf5e636a3c03e48ca3b","queryText":"from (select key x, value y from src1 where key > '2') t\nselect x, y where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"9180b71a610dbcf5e636a3c03e48ca3b","queryText":"from (select key x, value y from src1 where key > '2') t\nselect x, y where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 213	val_213
 238	val_238
 255	val_255
@@ -230,13 +230,13 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v1
-{"version":"1.0","engine":"mr","hash":"a3b2d2665c90fd669400f247f751f081","queryText":"create view dest_v1 as\n  select ctinyint, cint from alltypesorc where ctinyint is not null","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a3b2d2665c90fd669400f247f751f081","queryText":"create view dest_v1 as\n  select ctinyint, cint from alltypesorc where ctinyint is not null","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 PREHOOK: query: select * from dest_v1 order by ctinyint, cint limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"75e07b246069a5541af4a3983500b439","queryText":"select * from dest_v1 order by ctinyint, cint limit 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"75e07b246069a5541af4a3983500b439","queryText":"select * from dest_v1 order by ctinyint, cint limit 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -64	NULL
 -64	NULL
 PREHOOK: query: alter view dest_v1 as select ctinyint from alltypesorc
@@ -244,14 +244,14 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v1
-{"version":"1.0","engine":"mr","hash":"bcab8b0c498b0d94e0967170956392b6","queryText":"alter view dest_v1 as select ctinyint from alltypesorc","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bcab8b0c498b0d94e0967170956392b6","queryText":"alter view dest_v1 as select ctinyint from alltypesorc","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 PREHOOK: query: select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t
 where ctinyint > 10 order by ctinyint limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b0192d4da86f4bef38fe7ab1fc607906","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b0192d4da86f4bef38fe7ab1fc607906","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 11
 11
 PREHOOK: query: drop view if exists dest_v2
@@ -276,7 +276,7 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v2
-{"version":"1.0","engine":"mr","hash":"eda442b42b9c3a9cbdb7aff1984ad2dd","queryText":"create view dest_v2 (a, b) as select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cfloat) c3\n      from alltypesorc\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by x2,c1 desc","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v2.c1"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v2.x2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"eda442b42b9c3a9cbdb7aff1984ad2dd","queryText":"create view dest_v2 (a, b) as select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cfloat) c3\n      from alltypesorc\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by x2,c1 desc","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v2.c1"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v2.x2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 PREHOOK: query: drop view if exists dest_v3
 PREHOOK: type: DROPVIEW
 PREHOOK: query: create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as
@@ -297,7 +297,7 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v3
-{"version":"1.0","engine":"mr","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"edgeType":"PROJECTION"},{"sources":[11],"targets":[5],"edgeType":"PROJ
 ECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[7,10,11,12],"targets":[0,1,3,2,4,5,6],"expression":"((x.csmallint = 11) and (x.cint > 899) and (x.cfloat > 4.5) and (c.cstring1 < '7') and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"
 COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"edgeType":"PROJECTION"},{"sources":[11],"targets"
 :[5],"edgeType":"PROJECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[7,10,11,12],"targets":[0,1,3,2,4,5,6],"expression":"((x.csmallint = 11) and (x.cint > 899) and (x.cfloat > 4.5) and (c.cstring1 < '7') and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{
 "id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
 PREHOOK: query: alter view dest_v3 as
   select * from (
     select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,
@@ -311,13 +311,13 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v3
-{"version":"1.0","engine":"mr","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n  select * from (\n    select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n      count(b.cstring1) x, b.cboolean1\n    from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n    where a.cboolean2 = true and b.cfloat > 0\n    group by a.ctinyint, a.csmallint, b.cboolean1\n    having count(a.cint) > 10\n    order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col a) csmallint))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expressio
 n":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n  select * from (\n    select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n      count(b.cstring1) x, b.cboolean1\n    from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n    where a.cboolean2 = true and b.cfloat > 0\n    group by a.ctinyint, a.csmallint, b.cboolean1\n    having count(a.cint) > 10\n    order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col a) csmallint))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"target
 s":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltyp
 esorc.cfloat"}]}
 PREHOOK: query: select * from dest_v3 limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.c
 int) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(
 default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
 38	216	false
 38	229	true
 PREHOOK: query: drop table if exists src_dp
@@ -348,22 +348,22 @@ PREHOOK: query: insert into dest_dp1 partition (year) select first, word, year f
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp1
-{"version":"1.0","engine":"mr","hash":"b2d38401a3281e74a003d9650df97060","queryText":"insert into dest_dp1 partition (year) select first, word, year from src_dp","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b2d38401a3281e74a003d9650df97060","queryText":"insert into dest_dp1 partition (year) select first, word, year from src_dp","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp2
-{"version":"1.0","engine":"mr","hash":"237302d8ffd62b5b71d9544b22de7770","queryText":"insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src_dp.month"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"237302d8ffd62b5b71d9544b22de7770","queryText":"insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src_dp.month"}]}
 PREHOOK: query: insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp2@y=0
-{"version":"1.0","engine":"mr","hash":"63e990b47e7ab4eb6f2ea09dfb7453ff","queryText":"insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[6],"targets":[0,1,2],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"63e990b47e7ab4eb6f2ea09dfb7453ff","queryText":"insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[6],"targets":[0,1,2],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp3@y=0
-{"version":"1.0","engine":"mr","hash":"6bf71a9d02c0612c63b6f40b15c1e8b3","queryText":"insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":7,"vertexType":"CO
 LUMN","vertexId":"default.src_dp.day"},{"id":8,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"6bf71a9d02c0612c63b6f40b15c1e8b3","queryText":"insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src_dp.day"},{"id":8,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: drop table if exists src_dp1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table src_dp1 (f string, w string, m int)
@@ -385,4 +385,4 @@ PREHOOK: Output: default@dest_dp1@year=0
 PREHOOK: Output: default@dest_dp2
 PREHOOK: Output: default@dest_dp2@y=1
 PREHOOK: Output: default@dest_dp3@y=2
-{"version":"1.0","engine":"mr","hash":"44f16edbf35cfeaf3d4f7b0113a69b74","queryText":"from src_dp, src_dp1\ninsert into dest_dp1 partition (year) select first, word, year\ninsert into dest_dp2 partition (y, m) select first, word, year, month\ninsert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2\ninsert into dest_dp2 partition (y=1, m) select f, w, m\ninsert into dest_dp1 partition (year=0) select f, w","edges":[{"sources":[11],"targets":[0,1,2],"edgeType":"PROJECTION"},{"sources":[12],"targets":[3,4,5],"edgeType":"PROJECTION"},{"sources":[13],"targets":[6,7],"edgeType":"PROJECTION"},{"sources":[14],"targets":[8,9],"edgeType":"PROJECTION"},{"sources":[15],"targets":[1,0],"edgeType":"PROJECTION"},{"sources":[16],"targets":[4,3],"edgeType":"PROJECTION"},{"sources":[17],"targets":[8],"edgeType":"PROJECTION"},{"sources":[18],"targets":[10],"edgeType":"PROJECTION"},{"sources":[13],"targets":[2,5,9,10],"expression":"(src_dp.year = 2)","edgeType":"PREDI
 CATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":11,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":12,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":13,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":14,"vertexType":"COLUMN","vertexId":"default.src_dp.month
 "},{"id":15,"vertexType":"COLUMN","vertexId":"default.src_dp1.f"},{"id":16,"vertexType":"COLUMN","vertexId":"default.src_dp1.w"},{"id":17,"vertexType":"COLUMN","vertexId":"default.src_dp1.m"},{"id":18,"vertexType":"COLUMN","vertexId":"default.src_dp.day"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"44f16edbf35cfeaf3d4f7b0113a69b74","queryText":"from src_dp, src_dp1\ninsert into dest_dp1 partition (year) select first, word, year\ninsert into dest_dp2 partition (y, m) select first, word, year, month\ninsert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2\ninsert into dest_dp2 partition (y=1, m) select f, w, m\ninsert into dest_dp1 partition (year=0) select f, w","edges":[{"sources":[11],"targets":[0,1,2],"edgeType":"PROJECTION"},{"sources":[12],"targets":[3,4,5],"edgeType":"PROJECTION"},{"sources":[13],"targets":[6,7],"edgeType":"PROJECTION"},{"sources":[14],"targets":[8,9],"edgeType":"PROJECTION"},{"sources":[15],"targets":[1,0],"edgeType":"PROJECTION"},{"sources":[16],"targets":[4,3],"edgeType":"PROJECTION"},{"sources":[17],"targets":[8],"edgeType":"PROJECTION"},{"sources":[18],"targets":[10],"edgeType":"PROJECTION"},{"sources":[13],"targets":[2,5,9,10],"expression":"(src_dp.year = 
 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":11,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":12,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":13,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":14,"vertexType":"COLUMN","vertexId":
 "default.src_dp.month"},{"id":15,"vertexType":"COLUMN","vertexId":"default.src_dp1.f"},{"id":16,"vertexType":"COLUMN","vertexId":"default.src_dp1.w"},{"id":17,"vertexType":"COLUMN","vertexId":"default.src_dp1.m"},{"id":18,"vertexType":"COLUMN","vertexId":"default.src_dp.day"}]}


[06/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by xu...@apache.org.
HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/13f8cfec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/13f8cfec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/13f8cfec

Branch: refs/heads/spark
Commit: 13f8cfece819bec7ea045db92c18c6d7fc3784d0
Parents: 318c2ef
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Tue Nov 3 19:41:17 2015 -0800
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Tue Nov 3 19:41:17 2015 -0800

----------------------------------------------------------------------
 .../hive/ql/exec/vector/VectorSerializeRow.java |   4 +-
 .../expressions/FilterStructColumnInList.java   |   3 +-
 .../vector/expressions/StructColumnInList.java  |   3 +-
 .../hive/ql/exec/vector/TestVectorSerDeRow.java |  19 +-
 .../test/results/clientpositive/acid_join.q.out |   2 +-
 .../alter_partition_change_col.q.out            | 240 ++---
 .../clientpositive/alter_table_cascade.q.out    |  40 +-
 .../clientpositive/ansi_sql_arithmetic.q.out    |   2 +-
 .../results/clientpositive/avro_decimal.q.out   |  10 +-
 .../clientpositive/avro_decimal_native.q.out    |  10 +-
 .../clientpositive/cast_qualified_types.q.out   |   2 +-
 .../results/clientpositive/decimal_1_1.q.out    |  48 +-
 .../test/results/clientpositive/decimal_3.q.out | 514 +++++-----
 .../test/results/clientpositive/decimal_4.q.out | 144 +--
 .../test/results/clientpositive/decimal_5.q.out | 180 ++--
 .../test/results/clientpositive/decimal_6.q.out |  92 +-
 .../results/clientpositive/decimal_join2.q.out  | 260 ++---
 .../clientpositive/decimal_precision.q.out      | 170 ++--
 .../clientpositive/decimal_trailing.q.out       |  42 +-
 .../results/clientpositive/decimal_udf.q.out    | 960 +++++++++----------
 .../insert_nonacid_from_acid.q.out              |  20 +-
 .../llap/hybridgrace_hashjoin_1.q.out           | 204 ++--
 .../clientpositive/llap/mapjoin_decimal.q.out   | 424 ++++----
 .../results/clientpositive/orc_file_dump.q.out  |   6 +-
 .../clientpositive/orc_predicate_pushdown.q.out |   4 +-
 .../clientpositive/parquet_decimal.q.out        |  16 +-
 .../clientpositive/parquet_ppd_boolean.q.out    | 180 ++--
 .../clientpositive/parquet_ppd_char.q.out       | 220 ++---
 .../clientpositive/parquet_ppd_date.q.out       | 330 +++----
 .../clientpositive/parquet_ppd_decimal.q.out    | 660 ++++++-------
 .../clientpositive/parquet_ppd_timestamp.q.out  | 320 +++----
 .../clientpositive/parquet_ppd_varchar.q.out    | 220 ++---
 .../parquet_predicate_pushdown.q.out            |   4 +-
 .../results/clientpositive/serde_regex.q.out    |  74 +-
 .../spark/avro_decimal_native.q.out             |  10 +-
 .../clientpositive/spark/decimal_1_1.q.out      |  48 +-
 .../clientpositive/spark/mapjoin_decimal.q.out  | 424 ++++----
 .../spark/vector_between_in.q.out               |  14 +-
 .../spark/vector_cast_constant.q.java1.7.out    |  20 +-
 .../spark/vector_data_types.q.out               |   4 +-
 .../spark/vector_decimal_aggregate.q.out        |  32 +-
 .../spark/vector_decimal_mapjoin.q.out          | 212 ++--
 .../clientpositive/sum_expr_with_order.q.out    |   2 +-
 .../tez/hybridgrace_hashjoin_1.q.out            | 204 ++--
 .../clientpositive/tez/mapjoin_decimal.q.out    | 424 ++++----
 .../clientpositive/tez/update_all_types.q.out   |  30 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |   2 +-
 .../clientpositive/tez/vector_between_in.q.out  |  14 +-
 .../tez/vector_cast_constant.q.java1.7.out      |  20 +-
 .../clientpositive/tez/vector_data_types.q.out  |   4 +-
 .../clientpositive/tez/vector_decimal_2.q.out   |   4 +-
 .../clientpositive/tez/vector_decimal_3.q.out   | 514 +++++-----
 .../clientpositive/tez/vector_decimal_4.q.out   | 288 +++---
 .../clientpositive/tez/vector_decimal_5.q.out   | 180 ++--
 .../clientpositive/tez/vector_decimal_6.q.out   | 172 ++--
 .../tez/vector_decimal_aggregate.q.out          |  32 +-
 .../tez/vector_decimal_cast.q.out               |  20 +-
 .../tez/vector_decimal_expressions.q.out        |  20 +-
 .../tez/vector_decimal_mapjoin.q.out            | 212 ++--
 .../tez/vector_decimal_precision.q.out          | 170 ++--
 .../tez/vector_decimal_round_2.q.out            |  14 +-
 .../tez/vector_decimal_trailing.q.out           |  42 +-
 .../clientpositive/tez/vector_decimal_udf.q.out | 960 +++++++++----------
 .../tez/vector_reduce_groupby_decimal.q.out     |  98 +-
 .../clientpositive/update_all_types.q.out       |  30 +-
 .../clientpositive/vector_aggregate_9.q.out     |   2 +-
 .../clientpositive/vector_between_in.q.out      |  14 +-
 .../vector_cast_constant.q.java1.7.out          |  20 +-
 .../clientpositive/vector_data_types.q.out      |   4 +-
 .../clientpositive/vector_decimal_2.q.out       |   4 +-
 .../clientpositive/vector_decimal_3.q.out       | 514 +++++-----
 .../clientpositive/vector_decimal_4.q.out       | 288 +++---
 .../clientpositive/vector_decimal_5.q.out       | 180 ++--
 .../clientpositive/vector_decimal_6.q.out       | 172 ++--
 .../vector_decimal_aggregate.q.out              |  32 +-
 .../clientpositive/vector_decimal_cast.q.out    |  20 +-
 .../vector_decimal_expressions.q.out            |  20 +-
 .../clientpositive/vector_decimal_mapjoin.q.out | 212 ++--
 .../vector_decimal_precision.q.out              | 170 ++--
 .../clientpositive/vector_decimal_round_2.q.out |  14 +-
 .../vector_decimal_trailing.q.out               |  42 +-
 .../clientpositive/vector_decimal_udf.q.out     | 960 +++++++++----------
 .../vector_reduce_groupby_decimal.q.out         |  98 +-
 .../clientpositive/windowing_decimal.q.out      | 104 +-
 .../clientpositive/windowing_navfn.q.out        |  20 +-
 .../results/clientpositive/windowing_rank.q.out |  60 +-
 .../clientpositive/windowing_windowspec3.q.out  |  18 +-
 .../fast/BinarySortableSerializeWrite.java      |   2 +-
 .../hadoop/hive/serde2/fast/SerializeWrite.java |   2 +-
 .../hive/serde2/lazy/LazyHiveDecimal.java       |   4 +-
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |   3 +-
 .../lazy/fast/LazySimpleSerializeWrite.java     |  14 +-
 .../fast/LazyBinarySerializeWrite.java          |   2 +-
 .../apache/hadoop/hive/serde2/VerifyFast.java   |   9 +-
 .../binarysortable/TestBinarySortableFast.java  |   3 +-
 .../hive/serde2/lazy/TestLazySimpleFast.java    |   3 +-
 .../serde2/lazybinary/TestLazyBinaryFast.java   |   3 +-
 .../hadoop/hive/common/type/HiveDecimal.java    |  11 +
 .../ql/exec/vector/DecimalColumnVector.java     |   2 -
 99 files changed, 6331 insertions(+), 6342 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
index fe889b5..c98c260 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
@@ -491,7 +491,7 @@ public final class VectorSerializeRow<T extends SerializeWrite> {
 
       if (colVector.isRepeating) {
         if (colVector.noNulls || !colVector.isNull[0]) {
-          serializeWrite.writeHiveDecimal(colVector.vector[0].getHiveDecimal());
+          serializeWrite.writeHiveDecimal(colVector.vector[0].getHiveDecimal(), colVector.scale);
           return true;
         } else {
           serializeWrite.writeNull();
@@ -499,7 +499,7 @@ public final class VectorSerializeRow<T extends SerializeWrite> {
         }
       } else {
         if (colVector.noNulls || !colVector.isNull[batchIndex]) {
-          serializeWrite.writeHiveDecimal(colVector.vector[batchIndex].getHiveDecimal());
+          serializeWrite.writeHiveDecimal(colVector.vector[batchIndex].getHiveDecimal(), colVector.scale);
           return true;
         } else {
           serializeWrite.writeNull();

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
index 00f22bb..70b393c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
@@ -110,8 +110,9 @@ public class FilterStructColumnInList extends FilterStringColumnInList implement
             break;
 
           case DECIMAL:
+            DecimalColumnVector decColVector = ((DecimalColumnVector) colVec);
             binarySortableSerializeWrite.writeHiveDecimal(
-                ((DecimalColumnVector) colVec).vector[adjustedIndex].getHiveDecimal());
+                decColVector.vector[adjustedIndex].getHiveDecimal(), decColVector.scale);
             break;
 
           default:

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
index 724497a..769c70a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
@@ -111,8 +111,9 @@ public class StructColumnInList extends StringColumnInList implements IStructInE
             break;
 
           case DECIMAL:
+            DecimalColumnVector decColVector = ((DecimalColumnVector) colVec);
             binarySortableSerializeWrite.writeHiveDecimal(
-                ((DecimalColumnVector) colVec).vector[adjustedIndex].getHiveDecimal());
+                decColVector.vector[adjustedIndex].getHiveDecimal(), decColVector.scale);
             break;
 
           default:

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
index 23e44f0..eaff732 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
@@ -57,21 +57,10 @@ import org.apache.hadoop.hive.serde2.lazy.fast.LazySimpleSerializeWrite;
 import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead;
 import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableFloatObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableIntObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableShortObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.hive.serde2.fast.SerializeWrite;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.BytesWritable;
@@ -415,10 +404,10 @@ public class TestVectorSerDeRow extends TestCase {
   private Output serializeRow(Object[] row, RandomRowObjectSource source, SerializeWrite serializeWrite) throws HiveException, IOException {
     Output output = new Output();
     serializeWrite.set(output);
-    PrimitiveCategory[] primitiveCategories = source.primitiveCategories();
-    for (int i = 0; i < primitiveCategories.length; i++) {
+    PrimitiveTypeInfo[] primitiveTypeInfos = source.primitiveTypeInfos();
+    for (int i = 0; i < primitiveTypeInfos.length; i++) {
       Object object = row[i];
-      PrimitiveCategory primitiveCategory = primitiveCategories[i];
+      PrimitiveCategory primitiveCategory = primitiveTypeInfos[i].getPrimitiveCategory();
       switch (primitiveCategory) {
       case BOOLEAN:
         {
@@ -529,7 +518,7 @@ public class TestVectorSerDeRow extends TestCase {
         {
           HiveDecimalWritable expectedWritable = (HiveDecimalWritable) object;
           HiveDecimal value = expectedWritable.getHiveDecimal();
-          serializeWrite.writeHiveDecimal(value);
+          serializeWrite.writeHiveDecimal(value, ((DecimalTypeInfo)primitiveTypeInfos[i]).scale());
         }
         break;
       default:

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/acid_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_join.q.out b/ql/src/test/results/clientpositive/acid_join.q.out
index a1edb89..fcc7d75 100644
--- a/ql/src/test/results/clientpositive/acid_join.q.out
+++ b/ql/src/test/results/clientpositive/acid_join.q.out
@@ -65,7 +65,7 @@ POSTHOOK: query: select * from acidjoin3 order by name
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acidjoin3
 #### A masked pattern was here ####
-aaa	35	3
+aaa	35	3.00
 bbb	32	3.01
 ccc	32	3.02
 ddd	35	3.03

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
index 50520aa..9e397c1 100644
--- a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
@@ -213,16 +213,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77	abc	123
-Beck	80	abc	123
-Cluck	6	abc	123
-Mary	33	abc	123
-Mary	4	abc	123
-Snow	56	abc	123
-Tom	-12	abc	123
-Tom	19	abc	123
-Tom	235	abc	123
+Beck	0.0000	abc	123
+Beck	77.0000	abc	123
+Beck	80.0000	abc	123
+Cluck	6.0000	abc	123
+Mary	33.0000	abc	123
+Mary	4.0000	abc	123
+Snow	56.0000	abc	123
+Tom	-12.0000	abc	123
+Tom	19.0000	abc	123
+Tom	235.0000	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -233,16 +233,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- now change the column type of the existing partition
 alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -280,16 +280,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -300,16 +300,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- change column for default partition value
 alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -347,16 +347,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -367,16 +367,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try out replace columns
 alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
@@ -449,16 +449,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@alter_partition_change_col1
@@ -593,16 +593,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') add columns (c2 decimal(14,4))
 PREHOOK: type: ALTERTABLE_ADDCOLS
 PREHOOK: Input: default@alter_partition_change_col1
@@ -638,16 +638,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -658,16 +658,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try changing column for all partitions at once
 alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -724,16 +724,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77	abc	123
-Beck	80	abc	123
-Cluck	6	abc	123
-Mary	33	abc	123
-Mary	4	abc	123
-Snow	56	abc	123
-Tom	-12	abc	123
-Tom	19	abc	123
-Tom	235	abc	123
+Beck	0.0000	abc	123
+Beck	77.0000	abc	123
+Beck	80.0000	abc	123
+Cluck	6.0000	abc	123
+Mary	33.0000	abc	123
+Mary	4.0000	abc	123
+Snow	56.0000	abc	123
+Tom	-12.0000	abc	123
+Tom	19.0000	abc	123
+Tom	235.0000	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -744,13 +744,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/alter_table_cascade.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_cascade.q.out b/ql/src/test/results/clientpositive/alter_table_cascade.q.out
index 3bf1a43..1d8204c 100644
--- a/ql/src/test/results/clientpositive/alter_table_cascade.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_cascade.q.out
@@ -833,16 +833,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_table_cascade
 POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123
 #### A masked pattern was here ####
-Beck	0	xyz	123
-Beck	77.341	xyz	123
-Beck	79.9	xyz	123
-Cluck	5.96	xyz	123
-Mary	33.33	xyz	123
-Mary	4.329	xyz	123
-Snow	55.71	xyz	123
-Tom	-12.25	xyz	123
-Tom	19	xyz	123
-Tom	234.79	xyz	123
+Beck	0.0000	xyz	123
+Beck	77.3410	xyz	123
+Beck	79.9000	xyz	123
+Cluck	5.9600	xyz	123
+Mary	33.3300	xyz	123
+Mary	4.3290	xyz	123
+Snow	55.7100	xyz	123
+Tom	-12.2500	xyz	123
+Tom	19.0000	xyz	123
+Tom	234.7900	xyz	123
 PREHOOK: query: select * from alter_table_cascade where p1='abc'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_table_cascade
@@ -873,16 +873,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_table_cascade
 POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- 
 
 drop table if exists alter_table_restrict

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
index 5e5a2f6..021c4ee 100644
--- a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
@@ -44,7 +44,7 @@ POSTHOOK: query: select cast(key as int) / cast(key as int) from src limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-1
+1.00000000000
 PREHOOK: query: -- With ansi sql arithmetic disabled, int / int => double
 explain select cast(key as int) / cast(key as int) from src limit 1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/avro_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out
index 7ba376e..64e65ca 100644
--- a/ql/src/test/results/clientpositive/avro_decimal.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal.q.out
@@ -106,9 +106,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -175,10 +175,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
index 318be3d..cebc342 100644
--- a/ql/src/test/results/clientpositive/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
@@ -92,9 +92,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/cast_qualified_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast_qualified_types.q.out b/ql/src/test/results/clientpositive/cast_qualified_types.q.out
index 1924c5d..099a199 100644
--- a/ql/src/test/results/clientpositive/cast_qualified_types.q.out
+++ b/ql/src/test/results/clientpositive/cast_qualified_types.q.out
@@ -18,4 +18,4 @@ limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-0	0         	0
+0.00	0         	0

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_1_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_1_1.q.out b/ql/src/test/results/clientpositive/decimal_1_1.q.out
index b2704c6..46fbeb7 100644
--- a/ql/src/test/results/clientpositive/decimal_1_1.q.out
+++ b/ql/src/test/results/clientpositive/decimal_1_1.q.out
@@ -26,9 +26,9 @@ POSTHOOK: query: select * from decimal_1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_1_1
 #### A masked pattern was here ####
-0
-0
-0
+0.0
+0.0
+0.0
 0.1
 0.2
 0.9
@@ -37,13 +37,13 @@ NULL
 0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.9
@@ -52,10 +52,10 @@ NULL
 -0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
+0.0
 PREHOOK: query: select d from decimal_1_1 order by d desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_1_1
@@ -69,18 +69,18 @@ POSTHOOK: Input: default@decimal_1_1
 0.3
 0.2
 0.1
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.3

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_3.q.out b/ql/src/test/results/clientpositive/decimal_3.q.out
index 8e9a30a..3ded9a7 100644
--- a/ql/src/test/results/clientpositive/decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/decimal_3.q.out
@@ -33,43 +33,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -78,43 +78,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -125,43 +125,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -171,34 +171,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -208,34 +208,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -244,23 +244,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -269,71 +269,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -342,10 +342,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -354,10 +354,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_4.q.out b/ql/src/test/results/clientpositive/decimal_4.q.out
index 50662af..8eb1de4 100644
--- a/ql/src/test/results/clientpositive/decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_5.q.out b/ql/src/test/results/clientpositive/decimal_5.q.out
index 0f24b8a..0c46538 100644
--- a/ql/src/test/results/clientpositive/decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/decimal_5.q.out
@@ -35,41 +35,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -79,32 +79,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -161,40 +161,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out
index 0344fa9..e1ce600 100644
--- a/ql/src/test/results/clientpositive/decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/decimal_6.q.out
@@ -78,54 +78,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_join2.q.out b/ql/src/test/results/clientpositive/decimal_join2.q.out
index 604f99b..a3ca231 100644
--- a/ql/src/test/results/clientpositive/decimal_join2.q.out
+++ b/ql/src/test/results/clientpositive/decimal_join2.q.out
@@ -132,71 +132,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: EXPLAIN
 SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value
 PREHOOK: type: QUERY
@@ -282,71 +282,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out
index 69a6045..5542b40 100644
--- a/ql/src/test/results/clientpositive/decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/decimal_precision.q.out
@@ -76,13 +76,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -99,14 +99,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -159,13 +159,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -182,14 +182,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -242,37 +242,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -325,13 +325,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -348,14 +348,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -408,13 +408,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -431,14 +431,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -491,13 +491,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -514,14 +514,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -613,7 +613,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_trailing.q.out b/ql/src/test/results/clientpositive/decimal_trailing.q.out
index 6cfe282..1b70737 100644
--- a/ql/src/test/results/clientpositive/decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/decimal_trailing.q.out
@@ -40,16 +40,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -58,18 +58,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing


[45/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 6a80db7..0443f80 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size714;
-            ::apache::thrift::protocol::TType _etype717;
-            xfer += iprot->readListBegin(_etype717, _size714);
-            this->success.resize(_size714);
-            uint32_t _i718;
-            for (_i718 = 0; _i718 < _size714; ++_i718)
+            uint32_t _size716;
+            ::apache::thrift::protocol::TType _etype719;
+            xfer += iprot->readListBegin(_etype719, _size716);
+            this->success.resize(_size716);
+            uint32_t _i720;
+            for (_i720 = 0; _i720 < _size716; ++_i720)
             {
-              xfer += iprot->readString(this->success[_i718]);
+              xfer += iprot->readString(this->success[_i720]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter719;
-      for (_iter719 = this->success.begin(); _iter719 != this->success.end(); ++_iter719)
+      std::vector<std::string> ::const_iterator _iter721;
+      for (_iter721 = this->success.begin(); _iter721 != this->success.end(); ++_iter721)
       {
-        xfer += oprot->writeString((*_iter719));
+        xfer += oprot->writeString((*_iter721));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size720;
-            ::apache::thrift::protocol::TType _etype723;
-            xfer += iprot->readListBegin(_etype723, _size720);
-            (*(this->success)).resize(_size720);
-            uint32_t _i724;
-            for (_i724 = 0; _i724 < _size720; ++_i724)
+            uint32_t _size722;
+            ::apache::thrift::protocol::TType _etype725;
+            xfer += iprot->readListBegin(_etype725, _size722);
+            (*(this->success)).resize(_size722);
+            uint32_t _i726;
+            for (_i726 = 0; _i726 < _size722; ++_i726)
             {
-              xfer += iprot->readString((*(this->success))[_i724]);
+              xfer += iprot->readString((*(this->success))[_i726]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size725;
-            ::apache::thrift::protocol::TType _etype728;
-            xfer += iprot->readListBegin(_etype728, _size725);
-            this->success.resize(_size725);
-            uint32_t _i729;
-            for (_i729 = 0; _i729 < _size725; ++_i729)
+            uint32_t _size727;
+            ::apache::thrift::protocol::TType _etype730;
+            xfer += iprot->readListBegin(_etype730, _size727);
+            this->success.resize(_size727);
+            uint32_t _i731;
+            for (_i731 = 0; _i731 < _size727; ++_i731)
             {
-              xfer += iprot->readString(this->success[_i729]);
+              xfer += iprot->readString(this->success[_i731]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter730;
-      for (_iter730 = this->success.begin(); _iter730 != this->success.end(); ++_iter730)
+      std::vector<std::string> ::const_iterator _iter732;
+      for (_iter732 = this->success.begin(); _iter732 != this->success.end(); ++_iter732)
       {
-        xfer += oprot->writeString((*_iter730));
+        xfer += oprot->writeString((*_iter732));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size731;
-            ::apache::thrift::protocol::TType _etype734;
-            xfer += iprot->readListBegin(_etype734, _size731);
-            (*(this->success)).resize(_size731);
-            uint32_t _i735;
-            for (_i735 = 0; _i735 < _size731; ++_i735)
+            uint32_t _size733;
+            ::apache::thrift::protocol::TType _etype736;
+            xfer += iprot->readListBegin(_etype736, _size733);
+            (*(this->success)).resize(_size733);
+            uint32_t _i737;
+            for (_i737 = 0; _i737 < _size733; ++_i737)
             {
-              xfer += iprot->readString((*(this->success))[_i735]);
+              xfer += iprot->readString((*(this->success))[_i737]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size736;
-            ::apache::thrift::protocol::TType _ktype737;
-            ::apache::thrift::protocol::TType _vtype738;
-            xfer += iprot->readMapBegin(_ktype737, _vtype738, _size736);
-            uint32_t _i740;
-            for (_i740 = 0; _i740 < _size736; ++_i740)
+            uint32_t _size738;
+            ::apache::thrift::protocol::TType _ktype739;
+            ::apache::thrift::protocol::TType _vtype740;
+            xfer += iprot->readMapBegin(_ktype739, _vtype740, _size738);
+            uint32_t _i742;
+            for (_i742 = 0; _i742 < _size738; ++_i742)
             {
-              std::string _key741;
-              xfer += iprot->readString(_key741);
-              Type& _val742 = this->success[_key741];
-              xfer += _val742.read(iprot);
+              std::string _key743;
+              xfer += iprot->readString(_key743);
+              Type& _val744 = this->success[_key743];
+              xfer += _val744.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Type> ::const_iterator _iter743;
-      for (_iter743 = this->success.begin(); _iter743 != this->success.end(); ++_iter743)
+      std::map<std::string, Type> ::const_iterator _iter745;
+      for (_iter745 = this->success.begin(); _iter745 != this->success.end(); ++_iter745)
       {
-        xfer += oprot->writeString(_iter743->first);
-        xfer += _iter743->second.write(oprot);
+        xfer += oprot->writeString(_iter745->first);
+        xfer += _iter745->second.write(oprot);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size744;
-            ::apache::thrift::protocol::TType _ktype745;
-            ::apache::thrift::protocol::TType _vtype746;
-            xfer += iprot->readMapBegin(_ktype745, _vtype746, _size744);
-            uint32_t _i748;
-            for (_i748 = 0; _i748 < _size744; ++_i748)
+            uint32_t _size746;
+            ::apache::thrift::protocol::TType _ktype747;
+            ::apache::thrift::protocol::TType _vtype748;
+            xfer += iprot->readMapBegin(_ktype747, _vtype748, _size746);
+            uint32_t _i750;
+            for (_i750 = 0; _i750 < _size746; ++_i750)
             {
-              std::string _key749;
-              xfer += iprot->readString(_key749);
-              Type& _val750 = (*(this->success))[_key749];
-              xfer += _val750.read(iprot);
+              std::string _key751;
+              xfer += iprot->readString(_key751);
+              Type& _val752 = (*(this->success))[_key751];
+              xfer += _val752.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size751;
-            ::apache::thrift::protocol::TType _etype754;
-            xfer += iprot->readListBegin(_etype754, _size751);
-            this->success.resize(_size751);
-            uint32_t _i755;
-            for (_i755 = 0; _i755 < _size751; ++_i755)
+            uint32_t _size753;
+            ::apache::thrift::protocol::TType _etype756;
+            xfer += iprot->readListBegin(_etype756, _size753);
+            this->success.resize(_size753);
+            uint32_t _i757;
+            for (_i757 = 0; _i757 < _size753; ++_i757)
             {
-              xfer += this->success[_i755].read(iprot);
+              xfer += this->success[_i757].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter756;
-      for (_iter756 = this->success.begin(); _iter756 != this->success.end(); ++_iter756)
+      std::vector<FieldSchema> ::const_iterator _iter758;
+      for (_iter758 = this->success.begin(); _iter758 != this->success.end(); ++_iter758)
       {
-        xfer += (*_iter756).write(oprot);
+        xfer += (*_iter758).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size757;
-            ::apache::thrift::protocol::TType _etype760;
-            xfer += iprot->readListBegin(_etype760, _size757);
-            (*(this->success)).resize(_size757);
-            uint32_t _i761;
-            for (_i761 = 0; _i761 < _size757; ++_i761)
+            uint32_t _size759;
+            ::apache::thrift::protocol::TType _etype762;
+            xfer += iprot->readListBegin(_etype762, _size759);
+            (*(this->success)).resize(_size759);
+            uint32_t _i763;
+            for (_i763 = 0; _i763 < _size759; ++_i763)
             {
-              xfer += (*(this->success))[_i761].read(iprot);
+              xfer += (*(this->success))[_i763].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size762;
-            ::apache::thrift::protocol::TType _etype765;
-            xfer += iprot->readListBegin(_etype765, _size762);
-            this->success.resize(_size762);
-            uint32_t _i766;
-            for (_i766 = 0; _i766 < _size762; ++_i766)
+            uint32_t _size764;
+            ::apache::thrift::protocol::TType _etype767;
+            xfer += iprot->readListBegin(_etype767, _size764);
+            this->success.resize(_size764);
+            uint32_t _i768;
+            for (_i768 = 0; _i768 < _size764; ++_i768)
             {
-              xfer += this->success[_i766].read(iprot);
+              xfer += this->success[_i768].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter767;
-      for (_iter767 = this->success.begin(); _iter767 != this->success.end(); ++_iter767)
+      std::vector<FieldSchema> ::const_iterator _iter769;
+      for (_iter769 = this->success.begin(); _iter769 != this->success.end(); ++_iter769)
       {
-        xfer += (*_iter767).write(oprot);
+        xfer += (*_iter769).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size768;
-            ::apache::thrift::protocol::TType _etype771;
-            xfer += iprot->readListBegin(_etype771, _size768);
-            (*(this->success)).resize(_size768);
-            uint32_t _i772;
-            for (_i772 = 0; _i772 < _size768; ++_i772)
+            uint32_t _size770;
+            ::apache::thrift::protocol::TType _etype773;
+            xfer += iprot->readListBegin(_etype773, _size770);
+            (*(this->success)).resize(_size770);
+            uint32_t _i774;
+            for (_i774 = 0; _i774 < _size770; ++_i774)
             {
-              xfer += (*(this->success))[_i772].read(iprot);
+              xfer += (*(this->success))[_i774].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size773;
-            ::apache::thrift::protocol::TType _etype776;
-            xfer += iprot->readListBegin(_etype776, _size773);
-            this->success.resize(_size773);
-            uint32_t _i777;
-            for (_i777 = 0; _i777 < _size773; ++_i777)
+            uint32_t _size775;
+            ::apache::thrift::protocol::TType _etype778;
+            xfer += iprot->readListBegin(_etype778, _size775);
+            this->success.resize(_size775);
+            uint32_t _i779;
+            for (_i779 = 0; _i779 < _size775; ++_i779)
             {
-              xfer += this->success[_i777].read(iprot);
+              xfer += this->success[_i779].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter778;
-      for (_iter778 = this->success.begin(); _iter778 != this->success.end(); ++_iter778)
+      std::vector<FieldSchema> ::const_iterator _iter780;
+      for (_iter780 = this->success.begin(); _iter780 != this->success.end(); ++_iter780)
       {
-        xfer += (*_iter778).write(oprot);
+        xfer += (*_iter780).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size779;
-            ::apache::thrift::protocol::TType _etype782;
-            xfer += iprot->readListBegin(_etype782, _size779);
-            (*(this->success)).resize(_size779);
-            uint32_t _i783;
-            for (_i783 = 0; _i783 < _size779; ++_i783)
+            uint32_t _size781;
+            ::apache::thrift::protocol::TType _etype784;
+            xfer += iprot->readListBegin(_etype784, _size781);
+            (*(this->success)).resize(_size781);
+            uint32_t _i785;
+            for (_i785 = 0; _i785 < _size781; ++_i785)
             {
-              xfer += (*(this->success))[_i783].read(iprot);
+              xfer += (*(this->success))[_i785].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size784;
-            ::apache::thrift::protocol::TType _etype787;
-            xfer += iprot->readListBegin(_etype787, _size784);
-            this->success.resize(_size784);
-            uint32_t _i788;
-            for (_i788 = 0; _i788 < _size784; ++_i788)
+            uint32_t _size786;
+            ::apache::thrift::protocol::TType _etype789;
+            xfer += iprot->readListBegin(_etype789, _size786);
+            this->success.resize(_size786);
+            uint32_t _i790;
+            for (_i790 = 0; _i790 < _size786; ++_i790)
             {
-              xfer += this->success[_i788].read(iprot);
+              xfer += this->success[_i790].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter789;
-      for (_iter789 = this->success.begin(); _iter789 != this->success.end(); ++_iter789)
+      std::vector<FieldSchema> ::const_iterator _iter791;
+      for (_iter791 = this->success.begin(); _iter791 != this->success.end(); ++_iter791)
       {
-        xfer += (*_iter789).write(oprot);
+        xfer += (*_iter791).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size790;
-            ::apache::thrift::protocol::TType _etype793;
-            xfer += iprot->readListBegin(_etype793, _size790);
-            (*(this->success)).resize(_size790);
-            uint32_t _i794;
-            for (_i794 = 0; _i794 < _size790; ++_i794)
+            uint32_t _size792;
+            ::apache::thrift::protocol::TType _etype795;
+            xfer += iprot->readListBegin(_etype795, _size792);
+            (*(this->success)).resize(_size792);
+            uint32_t _i796;
+            for (_i796 = 0; _i796 < _size792; ++_i796)
             {
-              xfer += (*(this->success))[_i794].read(iprot);
+              xfer += (*(this->success))[_i796].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5099,14 +5099,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size795;
-            ::apache::thrift::protocol::TType _etype798;
-            xfer += iprot->readListBegin(_etype798, _size795);
-            this->success.resize(_size795);
-            uint32_t _i799;
-            for (_i799 = 0; _i799 < _size795; ++_i799)
+            uint32_t _size797;
+            ::apache::thrift::protocol::TType _etype800;
+            xfer += iprot->readListBegin(_etype800, _size797);
+            this->success.resize(_size797);
+            uint32_t _i801;
+            for (_i801 = 0; _i801 < _size797; ++_i801)
             {
-              xfer += iprot->readString(this->success[_i799]);
+              xfer += iprot->readString(this->success[_i801]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5145,10 +5145,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter800;
-      for (_iter800 = this->success.begin(); _iter800 != this->success.end(); ++_iter800)
+      std::vector<std::string> ::const_iterator _iter802;
+      for (_iter802 = this->success.begin(); _iter802 != this->success.end(); ++_iter802)
       {
-        xfer += oprot->writeString((*_iter800));
+        xfer += oprot->writeString((*_iter802));
       }
       xfer += oprot->writeListEnd();
     }
@@ -5193,14 +5193,313 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size801;
-            ::apache::thrift::protocol::TType _etype804;
-            xfer += iprot->readListBegin(_etype804, _size801);
-            (*(this->success)).resize(_size801);
-            uint32_t _i805;
-            for (_i805 = 0; _i805 < _size801; ++_i805)
+            uint32_t _size803;
+            ::apache::thrift::protocol::TType _etype806;
+            xfer += iprot->readListBegin(_etype806, _size803);
+            (*(this->success)).resize(_size803);
+            uint32_t _i807;
+            for (_i807 = 0; _i807 < _size803; ++_i807)
             {
-              xfer += iprot->readString((*(this->success))[_i805]);
+              xfer += iprot->readString((*(this->success))[_i807]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_args::~ThriftHiveMetastore_get_table_meta_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->db_patterns);
+          this->__isset.db_patterns = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->tbl_patterns);
+          this->__isset.tbl_patterns = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->tbl_types.clear();
+            uint32_t _size808;
+            ::apache::thrift::protocol::TType _etype811;
+            xfer += iprot->readListBegin(_etype811, _size808);
+            this->tbl_types.resize(_size808);
+            uint32_t _i812;
+            for (_i812 = 0; _i812 < _size808; ++_i812)
+            {
+              xfer += iprot->readString(this->tbl_types[_i812]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.tbl_types = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_args");
+
+  xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->db_patterns);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->tbl_patterns);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
+    std::vector<std::string> ::const_iterator _iter813;
+    for (_iter813 = this->tbl_types.begin(); _iter813 != this->tbl_types.end(); ++_iter813)
+    {
+      xfer += oprot->writeString((*_iter813));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_pargs::~ThriftHiveMetastore_get_table_meta_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_pargs");
+
+  xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->db_patterns)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->tbl_patterns)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
+    std::vector<std::string> ::const_iterator _iter814;
+    for (_iter814 = (*(this->tbl_types)).begin(); _iter814 != (*(this->tbl_types)).end(); ++_iter814)
+    {
+      xfer += oprot->writeString((*_iter814));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_result::~ThriftHiveMetastore_get_table_meta_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size815;
+            ::apache::thrift::protocol::TType _etype818;
+            xfer += iprot->readListBegin(_etype818, _size815);
+            this->success.resize(_size815);
+            uint32_t _i819;
+            for (_i819 = 0; _i819 < _size815; ++_i819)
+            {
+              xfer += this->success[_i819].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+      std::vector<TableMeta> ::const_iterator _iter820;
+      for (_iter820 = this->success.begin(); _iter820 != this->success.end(); ++_iter820)
+      {
+        xfer += (*_iter820).write(oprot);
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_presult::~ThriftHiveMetastore_get_table_meta_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size821;
+            ::apache::thrift::protocol::TType _etype824;
+            xfer += iprot->readListBegin(_etype824, _size821);
+            (*(this->success)).resize(_size821);
+            uint32_t _i825;
+            for (_i825 = 0; _i825 < _size821; ++_i825)
+            {
+              xfer += (*(this->success))[_i825].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5338,14 +5637,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size806;
-            ::apache::thrift::protocol::TType _etype809;
-            xfer += iprot->readListBegin(_etype809, _size806);
-            this->success.resize(_size806);
-            uint32_t _i810;
-            for (_i810 = 0; _i810 < _size806; ++_i810)
+            uint32_t _size826;
+            ::apache::thrift::protocol::TType _etype829;
+            xfer += iprot->readListBegin(_etype829, _size826);
+            this->success.resize(_size826);
+            uint32_t _i830;
+            for (_i830 = 0; _i830 < _size826; ++_i830)
             {
-              xfer += iprot->readString(this->success[_i810]);
+              xfer += iprot->readString(this->success[_i830]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5384,10 +5683,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter811;
-      for (_iter811 = this->success.begin(); _iter811 != this->success.end(); ++_iter811)
+      std::vector<std::string> ::const_iterator _iter831;
+      for (_iter831 = this->success.begin(); _iter831 != this->success.end(); ++_iter831)
       {
-        xfer += oprot->writeString((*_iter811));
+        xfer += oprot->writeString((*_iter831));
       }
       xfer += oprot->writeListEnd();
     }
@@ -5432,14 +5731,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size812;
-            ::apache::thrift::protocol::TType _etype815;
-            xfer += iprot->readListBegin(_etype815, _size812);
-            (*(this->success)).resize(_size812);
-            uint32_t _i816;
-            for (_i816 = 0; _i816 < _size812; ++_i816)
+            uint32_t _size832;
+            ::apache::thrift::protocol::TType _etype835;
+            xfer += iprot->readListBegin(_etype835, _size832);
+            (*(this->success)).resize(_size832);
+            uint32_t _i836;
+            for (_i836 = 0; _i836 < _size832; ++_i836)
             {
-              xfer += iprot->readString((*(this->success))[_i816]);
+              xfer += iprot->readString((*(this->success))[_i836]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5749,14 +6048,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_names.clear();
-            uint32_t _size817;
-            ::apache::thrift::protocol::TType _etype820;
-            xfer += iprot->readListBegin(_etype820, _size817);
-            this->tbl_names.resize(_size817);
-            uint32_t _i821;
-            for (_i821 = 0; _i821 < _size817; ++_i821)
+            uint32_t _size837;
+            ::apache::thrift::protocol::TType _etype840;
+            xfer += iprot->readListBegin(_etype840, _size837);
+            this->tbl_names.resize(_size837);
+            uint32_t _i841;
+            for (_i841 = 0; _i841 < _size837; ++_i841)
             {
-              xfer += iprot->readString(this->tbl_names[_i821]);
+              xfer += iprot->readString(this->tbl_names[_i841]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5789,10 +6088,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter822;
-    for (_iter822 = this->tbl_names.begin(); _iter822 != this->tbl_names.end(); ++_iter822)
+    std::vector<std::string> ::const_iterator _iter842;
+    for (_iter842 = this->tbl_names.begin(); _iter842 != this->tbl_names.end(); ++_iter842)
     {
-      xfer += oprot->writeString((*_iter822));
+      xfer += oprot->writeString((*_iter842));
     }
     xfer += oprot->writeListEnd();
   }
@@ -5820,10 +6119,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter823;
-    for (_iter823 = (*(this->tbl_names)).begin(); _iter823 != (*(this->tbl_names)).end(); ++_iter823)
+    std::vector<std::string> ::const_iterator _iter843;
+    for (_iter843 = (*(this->tbl_names)).begin(); _iter843 != (*(this->tbl_names)).end(); ++_iter843)
     {
-      xfer += oprot->writeString((*_iter823));
+      xfer += oprot->writeString((*_iter843));
     }
     xfer += oprot->writeListEnd();
   }
@@ -5864,14 +6163,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size824;
-            ::apache::thrift::protocol::TType _etype827;
-            xfer += iprot->readListBegin(_etype827, _size824);
-            this->success.resize(_size824);
-            uint32_t _i828;
-            for (_i828 = 0; _i828 < _size824; ++_i828)
+            uint32_t _size844;
+            ::apache::thrift::protocol::TType _etype847;
+            xfer += iprot->readListBegin(_etype847, _size844);
+            this->success.resize(_size844);
+            uint32_t _i848;
+            for (_i848 = 0; _i848 < _size844; ++_i848)
             {
-              xfer += this->success[_i828].read(iprot);
+              xfer += this->success[_i848].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5926,10 +6225,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Table> ::const_iterator _iter829;
-      for (_iter829 = this->success.begin(); _iter829 != this->success.end(); ++_iter829)
+      std::vector<Table> ::const_iterator _iter849;
+      for (_iter849 = this->success.begin(); _iter849 != this->success.end(); ++_iter849)
       {
-        xfer += (*_iter829).write(oprot);
+        xfer += (*_iter849).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -5982,14 +6281,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size830;
-            ::apache::thrift::protocol::TType _etype833;
-            xfer += iprot->readListBegin(_etype833, _size830);
-            (*(this->success)).resize(_size830);
-            uint32_t _i834;
-            for (_i834 = 0; _i834 < _size830; ++_i834)
+            uint32_t _size850;
+            ::apache::thrift::protocol::TType _etype853;
+            xfer += iprot->readListBegin(_etype853, _size850);
+            (*(this->success)).resize(_size850);
+            uint32_t _i854;
+            for (_i854 = 0; _i854 < _size850; ++_i854)
             {
-              xfer += (*(this->success))[_i834].read(iprot);
+              xfer += (*(this->success))[_i854].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -6175,14 +6474,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size835;
-            ::apache::thrift::protocol::TType _etype838;
-            xfer += iprot->readListBegin(_etype838, _size835);
-            this->success.resize(_size835);
-            uint32_t _i839;
-            for (_i839 = 0; _i839 < _size835; ++_i839)
+            uint32_t _size855;
+            ::apache::thrift::protocol::TType _etype858;
+            xfer += iprot->readListBegin(_etype858, _size855);
+            this->success.resize(_size855);
+            uint32_t _i859;
+            for (_i859 = 0; _i859 < _size855; ++_i859)
             {
-              xfer += iprot->readString(this->success[_i839]);
+              xfer += iprot->readString(this->success[_i859]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6237,10 +6536,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter840;
-      for (_iter840 = this->success.begin(); _iter840 != this->success.end(); ++_iter840)
+      std::vector<std::string> ::const_iterator _iter860;
+      for (_iter860 = this->success.begin(); _iter860 != this->success.end(); ++_iter860)
       {
-        xfer += oprot->writeString((*_iter840));
+        xfer += oprot->writeString((*_iter860));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6293,14 +6592,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size841;
-            ::apache::thrift::protocol::TType _etype844;
-            xfer += iprot->readListBegin(_etype844, _size841);
-            (*(this->success)).resize(_size841);
-            uint32_t _i845;
-            for (_i845 = 0; _i845 < _size841; ++_i845)
+            uint32_t _size861;
+            ::apache::thrift::protocol::TType _etype864;
+            xfer += iprot->readListBegin(_etype864, _size861);
+            (*(this->success)).resize(_size861);
+            uint32_t _i865;
+            for (_i865 = 0; _i865 < _size861; ++_i865)
             {
-              xfer += iprot->readString((*(this->success))[_i845]);
+              xfer += iprot->readString((*(this->success))[_i865]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7634,14 +7933,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size846;
-            ::apache::thrift::protocol::TType _etype849;
-            xfer += iprot->readListBegin(_etype849, _size846);
-            this->new_parts.resize(_size846);
-            uint32_t _i850;
-            for (_i850 = 0; _i850 < _size846; ++_i850)
+            uint32_t _size866;
+            ::apache::thrift::protocol::TType _etype869;
+            xfer += iprot->readListBegin(_etype869, _size866);
+            this->new_parts.resize(_size866);
+            uint32_t _i870;
+            for (_i870 = 0; _i870 < _size866; ++_i870)
             {
-              xfer += this->new_parts[_i850].read(iprot);
+              xfer += this->new_parts[_i870].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7670,10 +7969,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter851;
-    for (_iter851 = this->new_parts.begin(); _iter851 != this->new_parts.end(); ++_iter851)
+    std::vector<Partition> ::const_iterator _iter871;
+    for (_iter871 = this->new_parts.begin(); _iter871 != this->new_parts.end(); ++_iter871)
     {
-      xfer += (*_iter851).write(oprot);
+      xfer += (*_iter871).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -7697,10 +7996,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter852;
-    for (_iter852 = (*(this->new_parts)).begin(); _iter852 != (*(this->new_parts)).end(); ++_iter852)
+    std::vector<Partition> ::const_iterator _iter872;
+    for (_iter872 = (*(this->new_parts)).begin(); _iter872 != (*(this->new_parts)).end(); ++_iter872)
     {
-      xfer += (*_iter852).write(oprot);
+      xfer += (*_iter872).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -7909,14 +8208,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size853;
-            ::apache::thrift::protocol::TType _etype856;
-            xfer += iprot->readListBegin(_etype856, _size853);
-            this->new_parts.resize(_size853);
-            uint32_t _i857;
-            for (_i857 = 0; _i857 < _size853; ++_i857)
+            uint32_t _size873;
+            ::apache::thrift::protocol::TType _etype876;
+            xfer += iprot->readListBegin(_etype876, _size873);
+            this->new_parts.resize(_size873);
+            uint32_t _i877;
+            for (_i877 = 0; _i877 < _size873; ++_i877)
             {
-              xfer += this->new_parts[_i857].read(iprot);
+              xfer += this->new_parts[_i877].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7945,10 +8244,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<PartitionSpec> ::const_iterator _iter858;
-    for (_iter858 = this->new_parts.begin(); _iter858 != this->new_parts.end(); ++_iter858)
+    std::vector<PartitionSpec> ::const_iterator _iter878;
+    for (_iter878 = this->new_parts.begin(); _iter878 != this->new_parts.end(); ++_iter878)
     {
-      xfer += (*_iter858).write(oprot);
+      xfer += (*_iter878).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -7972,10 +8271,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<PartitionSpec> ::const_iterator _iter859;
-    for (_iter859 = (*(this->new_parts)).begin(); _iter859 != (*(this->new_parts)).end(); ++_iter859)
+    std::vector<PartitionSpec> ::const_iterator _iter879;
+    for (_iter879 = (*(this->new_parts)).begin(); _iter879 != (*(this->new_parts)).end(); ++_iter879)
     {
-      xfer += (*_iter859).write(oprot);
+      xfer += (*_iter879).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8200,14 +8499,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size860;
-            ::apache::thrift::protocol::TType _etype863;
-            xfer += iprot->readListBegin(_etype863, _size860);
-            this->part_vals.resize(_size860);
-            uint32_t _i864;
-            for (_i864 = 0; _i864 < _size860; ++_i864)
+            uint32_t _size880;
+            ::apache::thrift::protocol::TType _etype883;
+            xfer += iprot->readListBegin(_etype883, _size880);
+            this->part_vals.resize(_size880);
+            uint32_t _i884;
+            for (_i884 = 0; _i884 < _size880; ++_i884)
             {
-              xfer += iprot->readString(this->part_vals[_i864]);
+              xfer += iprot->readString(this->part_vals[_i884]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8244,10 +8543,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter865;
-    for (_iter865 = this->part_vals.begin(); _iter865 != this->part_vals.end(); ++_iter865)
+    std::vector<std::string> ::const_iterator _iter885;
+    for (_iter885 = this->part_vals.begin(); _iter885 != this->part_vals.end(); ++_iter885)
     {
-      xfer += oprot->writeString((*_iter865));
+      xfer += oprot->writeString((*_iter885));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8279,10 +8578,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter866;
-    for (_iter866 = (*(this->part_vals)).begin(); _iter866 != (*(this->part_vals)).end(); ++_iter866)
+    std::vector<std::string> ::const_iterator _iter886;
+    for (_iter886 = (*(this->part_vals)).begin(); _iter886 != (*(this->part_vals)).end(); ++_iter886)
     {
-      xfer += oprot->writeString((*_iter866));
+      xfer += oprot->writeString((*_iter886));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8754,14 +9053,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size867;
-            ::apache::thrift::protocol::TType _etype870;
-            xfer += iprot->readListBegin(_etype870, _size867);
-            this->part_vals.resize(_size867);
-            uint32_t _i871;
-            for (_i871 = 0; _i871 < _size867; ++_i871)
+            uint32_t _size887;
+            ::apache::thrift::protocol::TType _etype890;
+            xfer += iprot->readListBegin(_etype890, _size887);
+            this->part_vals.resize(_size887);
+            uint32_t _i891;
+            for (_i891 = 0; _i891 < _size887; ++_i891)
             {
-              xfer += iprot->readString(this->part_vals[_i871]);
+              xfer += iprot->readString(this->part_vals[_i891]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8806,10 +9105,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter872;
-    for (_iter872 = this->part_vals.begin(); _iter872 != this->part_vals.end(); ++_iter872)
+    std::vector<std::string> ::const_iterator _iter892;
+    for (_iter892 = this->part_vals.begin(); _iter892 != this->part_vals.end(); ++_iter892)
     {
-      xfer += oprot->writeString((*_iter872));
+      xfer += oprot->writeString((*_iter892));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8845,10 +9144,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter873;
-    for (_iter873 = (*(this->part_vals)).begin(); _iter873 != (*(this->part_vals)).end(); ++_iter873)
+    std::vector<std::string> ::const_iterator _iter893;
+    for (_iter893 = (*(this->part_vals)).begin(); _iter893 != (*(this->part_vals)).end(); ++_iter893)
     {
-      xfer += oprot->writeString((*_iter873));
+      xfer += oprot->writeString((*_iter893));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9651,14 +9950,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size874;
-            ::apache::thrift::protocol::TType _etype877;
-            xfer += iprot->readListBegin(_etype877, _size874);
-            this->part_vals.resize(_size874);
-            uint32_t _i878;
-            for (_i878 = 0; _i878 < _size874; ++_i878)
+            uint32_t _size894;
+            ::apache::thrift::protocol::TType _etype897;
+            xfer += iprot->readListBegin(_etype897, _size894);
+            this->part_vals.resize(_size894);
+            uint32_t _i898;
+            for (_i898 = 0; _i898 < _size894; ++_i898)
             {
-              xfer += iprot->readString(this->part_vals[_i878]);
+              xfer += iprot->readString(this->part_vals[_i898]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9703,10 +10002,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter879;
-    for (_iter879 = this->part_vals.begin(); _iter879 != this->part_vals.end(); ++_iter879)
+    std::vector<std::string> ::const_iterator _iter899;
+    for (_iter899 = this->part_vals.begin(); _iter899 != this->part_vals.end(); ++_iter899)
     {
-      xfer += oprot->writeString((*_iter879));
+      xfer += oprot->writeString((*_iter899));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9742,10 +10041,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter880;
-    for (_iter880 = (*(this->part_vals)).begin(); _iter880 != (*(this->part_vals)).end(); ++_iter880)
+    std::vector<std::string> ::const_iterator _iter900;
+    for (_iter900 = (*(this->part_vals)).begin(); _iter900 != (*(this->part_vals)).end(); ++_iter900)
     {
-      xfer += oprot->writeString((*_iter880));
+      xfer += oprot->writeString((*_iter900));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9954,14 +10253,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size881;
-            ::apache::thrift::protocol::TType _etype884;
-            xfer += iprot->readListBegin(_etype884, _size881);
-            this->part_vals.resize(_size881);
-            uint32_t _i885;
-            for (_i885 = 0; _i885 < _size881; ++_i885)
+            uint32_t _size901;
+            ::apache::thrift::protocol::TType _etype904;
+            xfer += iprot->readListBegin(_etype904, _size901);
+            this->part_vals.resize(_size901);
+            uint32_t _i905;
+            for (_i905 = 0; _i905 < _size901; ++_i905)
             {
-              xfer += iprot->readString(this->part_vals[_i885]);
+              xfer += iprot->readString(this->part_vals[_i905]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10014,10 +10313,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter886;
-    for (_iter886 = this->part_vals.begin(); _iter886 != this->part_vals.end(); ++_iter886)
+    std::vector<std::string> ::const_iterator _iter906;
+    for (_iter906 = this->part_vals.begin(); _iter906 != this->part_vals.end(); ++_iter906)
     {
-      xfer += oprot->writeString((*_iter886));
+      xfer += oprot->writeString((*_iter906));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10057,10 +10356,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter887;
-    for (_iter887 = (*(this->part_vals)).begin(); _iter887 != (*(this->part_vals)).end(); ++_iter887)
+    std::vector<std::string> ::const_iterator _iter907;
+    for (_iter907 = (*(this->part_vals)).begin(); _iter907 != (*(this->part_vals)).end(); ++_iter907)
     {
-      xfer += oprot->writeString((*_iter887));
+      xfer += oprot->writeString((*_iter907));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11066,14 +11365,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size888;
-            ::apache::thrift::protocol::TType _etype891;
-            xfer += iprot->readListBegin(_etype891, _size888);
-            this->part_vals.resize(_size888);
-            uint32_t _i892;
-            for (_i892 = 0; _i892 < _size888; ++_i892)
+            uint32_t _size908;
+            ::apache::thrift::protocol::TType _etype911;
+            xfer += iprot->readListBegin(_etype911, _size908);
+            this->part_vals.resize(_size908);
+            uint32_t _i912;
+            for (_i912 = 0; _i912 < _size908; ++_i912)
             {
-              xfer += iprot->readString(this->part_vals[_i892]);
+              xfer += iprot->readString(this->part_vals[_i912]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11110,10 +11409,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter893;
-    for (_iter893 = this->part_vals.begin(); _iter893 != this->part_vals.end(); ++_iter893)
+    std::vector<std::string> ::const_iterator _iter913;
+    for (_iter913 = this->part_vals.begin(); _iter913 != this->part_vals.end(); ++_iter913)
     {
-      xfer += oprot->writeString((*_iter893));
+      xfer += oprot->writeString((*_iter913));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11145,10 +11444,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter894;
-    for (_iter894 = (*(this->part_vals)).begin(); _iter894 != (*(this->part_vals)).end(); ++_iter894)
+    std::vector<std::string> ::const_iterator _iter914;
+    for (_iter914 = (*(this->part_vals)).begin(); _iter914 != (*(this->part_vals)).end(); ++_iter914)
     {
-      xfer += oprot->writeString((*_iter894));
+      xfer += oprot->writeString((*_iter914));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11337,17 +11636,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size895;
-            ::apache::thrift::protocol::TType _ktype896;
-            ::apache::thrift::protocol::TType _vtype897;
-            xfer += iprot->readMapBegin(_ktype896, _vtype897, _size895);
-            uint32_t _i899;
-            for (_i899 = 0; _i899 < _size895; ++_i899)
+            uint32_t _size915;
+            ::apache::thrift::protocol::TType _ktype916;
+            ::apache::thrift::protocol::TType _vtype917;
+            xfer += iprot->readMapBegin(_ktype916, _vtype917, _size915);
+            uint32_t _i919;
+            for (_i919 = 0; _i919 < _size915; ++_i919)
             {
-              std::string _key900;
-              xfer += iprot->readString(_key900);
-              std::string& _val901 = this->partitionSpecs[_key900];
-              xfer += iprot->readString(_val901);
+              std::string _key920;
+              xfer += iprot->readString(_key920);
+              std::string& _val921 = this->partitionSpecs[_key920];
+              xfer += iprot->readString(_val921);
             }
             xfer += iprot->readMapEnd();
           }
@@ -11408,11 +11707,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter902;
-    for (_iter902 = this->partitionSpecs.begin(); _iter902 != this->partitionSpecs.end(); ++_iter902)
+    std::map<std::string, std::string> ::const_iterator _iter922;
+    for (_iter922 = this->partitionSpecs.begin(); _iter922 != this->partitionSpecs.end(); ++_iter922)
     {
-      xfer += oprot->writeString(_iter902->first);
-      xfer += oprot->writeString(_iter902->second);
+      xfer += oprot->writeString(_iter922->first);
+      xfer += oprot->writeString(_iter922->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -11452,11 +11751,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter903;
-    for (_iter903 = (*(this->partitionSpecs)).begin(); _iter903 != (*(this->partitionSpecs)).end(); ++_iter903)
+    std::map<std::string, std::string> ::const_iterator _iter923;
+    for (_iter923 = (*(this->partitionSpecs)).begin(); _iter923 != (*(this->partitionSpecs)).end(); ++_iter923)
     {
-      xfer += oprot->writeString(_iter903->first);
-      xfer += oprot->writeString(_iter903->second);
+      xfer += oprot->writeString(_iter923->first);
+      xfer += oprot->writeString(_iter923->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -11701,17 +12000,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size904;
-            ::apache::thrift::protocol::TType _ktype905;
-            ::apache::thrift::protocol::TType _vtype906;
-            xfer += iprot->readMapBegin(_ktype905, _vtype906, _size904);
-            uint32_t _i908;
-            for (_i908 = 0; _i908 < _size904; ++_i908)
+            uint32_t _size924;
+            ::apache::thrift::protocol::TType _ktype925;
+            ::apache::thrift::protocol::TType _vtype926;
+            xfer += iprot->readMapBegin(_ktype925, _vtype926, _size924);
+            uint32_t _i928;
+            for (_i928 = 0; _i928 < _size924; ++_i928)
             {
-              std::string _key909;
-              xfer += iprot->readString(_key909);
-              std::string& _val910 = this->partitionSpecs[_key909];
-              xfer += iprot->readString(_val910);
+              std::string _key929;
+              xfer += iprot->readString(_key929);
+              std::string& _val930 = this->partitionSpecs[_key929];
+              xfer += iprot->readString(_val930);
             }
             xfer += iprot->readMapEnd();
           }
@@ -11772,11 +12071,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter911;
-    for (_iter911 = this->partitionSpecs.begin(); _iter911 != this->partitionSpecs.end(); ++_iter911)
+    std::map<std::string, std::string> ::const_iterator _iter931;
+    for (_iter931 = this->partitionSpecs.begin(); _iter931 != this->partitionSpecs.end(); ++_iter931)
     {
-      xfer += oprot->writeString(_iter911->first);
-      xfer += oprot->writeString(_iter911->second);
+      xfer += oprot->writeString(_iter931->first);
+      xfer += oprot->writeString(_iter931->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -11816,11 +12115,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter912;
-    for (_iter912 = (*(this->partitionSpecs)).begin(); _iter912 != (*(this->partitionSpecs)).end(); ++_iter912)
+    std::map<std::string, std::string> ::const_iterator _iter932;
+    for (_iter932 = (*(this->partitionSpecs)).begin(); _iter932 != (*(this->partitionSpecs)).end(); ++_iter932)
     {
-      xfer += oprot->writeString(_iter912->first);
-      xfer += oprot->writeString(_iter912->second);
+      xfer += oprot->writeString(_iter932->first);
+      xfer += oprot->writeString(_iter932->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -11877,14 +12176,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size913;
-            ::apache::thrift::protocol::TType _etype916;
-            xfer += iprot->readListBegin(_etype916, _size913);
-            this->success.resize(_size913);
-            uint32_t _i917;
-            for (_i917 = 0; _i917 < _size913; ++_i917)
+            uint32_t _size933;
+            ::apache::thrift::protocol::TType _etype936;
+            xfer += iprot->readListBegin(_etype936, _size933);
+            this->success.resize(_size933);
+            uint32_t _i937;
+            for (_i937 = 0; _i937 < _size933; ++_i937)
             {
-              xfer += this->success[_i917].read(iprot);
+              xfer += this->success[_i937].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -11947,10 +12246,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter918;
-      for (_iter918 = this->success.begin(); _iter918 != this->success.end(); ++_iter918)
+      std::vector<Partition> ::const_iterator _iter938;
+      for (_iter938 = this->success.begin(); _iter938 != this->success.end(); ++_iter938)
       {
-        xfer += (*_iter918).write(oprot);
+        xfer += (*_iter938).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -12007,14 +12306,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size919;
-            ::apache::thrift::protocol::TType _etype922;
-            xfer += iprot->readListBegin(_etype922, _size919);
-            (*(this->success)).resize(_size919);
-            uint32_t _i923;
-            for (_i923 = 0; _i923 < _size919; ++_i923)
+            uint32_t _size939;
+            ::apache::thrift::protocol::TType _etype942;
+            xfer += iprot->readListBegin(_etype942, _size939);
+            (*(this->success)).resize(_size939);
+            uint32_t _i943;
+            for (_i943 = 0; _i943 < _size939; ++_i943)
             {
-              xfer += (*(this->success))[_i923].read(iprot);
+              xfer += (*(this->success))[_i943].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12113,14 +12412,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size924;
-            ::apache::thrift::protocol::TType _etype927;
-            xfer += iprot->readListBegin(_etype927, _size924);
-            this->part_vals.resize(_size924);
-            uint32_t _i928;
-            for (_i928 = 0; _i928 < _size924; ++_i928)
+            uint32_t _size944;
+            ::apache::thrift::protocol::TType _etype947;
+            xfer += iprot->readListBegin(_etype947, _size944);
+            this->part_vals.resize(_size944);
+            uint32_t _i948;
+            for (_i948 = 0; _i948 < _size944; ++_i948)
             {
-              xfer += iprot->readString(this->part_vals[_i928]);
+              xfer += iprot->readString(this->part_vals[_i948]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12141,14 +12440,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size929;
-            ::apache::thrift::protocol::TType _etype932;
-            xfer += iprot->readListBegin(_etype932, _size929);
-            this->group_names.resize(_size929);
-            uint32_t _i933;
-            for (_i933 = 0; _i933 < _size929; ++_i933)
+            uint32_t _size949;
+            ::apache::thrift::protocol::TType _etype952;
+            xfer += iprot->readListBegin(_etype952, _size949);
+            this->group_names.resize(_size949);
+            uint32_t _i953;
+            for (_i953 = 0; _i953 < _size949; ++_i953)
             {
-              xfer += iprot->readString(this->group_names[_i933]);
+              xfer += iprot->readString(this->group_names[_i953]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12185,10 +12484,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter934;
-    for (_iter934 = this->part_vals.begin(); _iter934 != this->part_vals.end(); ++_iter934)
+    std::vector<std::string> ::const_iterator _iter954;
+    for (_iter954 = this->part_vals.begin(); _iter954 != this->part_vals.end(); ++_iter954)
     {
-      xfer += oprot->writeString((*_iter934));
+      xfer += oprot->writeString((*_iter954));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12201,10 +12500,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter935;
-    for (_iter935 = this->group_names.begin(); _iter935 != this->group_names.end(); ++_iter935)
+    std::vector<std::string> ::const_iterator _iter955;
+    for (_iter955 = this->group_names.begin(); _iter955 != this->group_names.end(); ++_iter955)
     {
-      xfer += oprot->writeString((*_iter935));
+      xfer += oprot->writeString((*_iter955));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12236,10 +12535,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter936;
-    for (_iter936 = (*(this->part_vals)).begin(); _iter936 != (*(this->part_vals)).end(); ++_iter936)
+    std::vector<std::string> ::const_iterator _iter956;
+    for (_iter956 = (*(this->part_vals)).begin(); _iter956 != (*(this->part_vals)).end(); ++_iter956)
     {
-      xfer += oprot->writeString((*_iter936));
+      xfer += oprot->writeString((*_iter956));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12252,10 +12551,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter937;
-    for (_iter937 = (*(this->group_names)).begin(); _iter937 != (*(this->group_names)).end(); ++_iter937)
+    std::vector<std::string> ::const_iterator _iter957;
+    for (_iter957 = (*(this->group_names)).begin(); _iter957 != (*(this->group_names)).end(); ++_iter957)
     {
-      xfer += oprot->writeString((*_iter937));
+      xfer += oprot->writeString((*_iter957));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12814,14 +13113,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size938;
-            ::apache::thrift::protocol::TType _etype941;
-            xfer += iprot->readListBegin(_etype941, _size938);
-            this->success.resize(_size938);
-            uint32_t _i942;
-            for (_i942 = 0; _i942 < _size938; ++_i942)
+            uint32_t _size958;
+            ::apache::thrift::protocol::TType _etype961;
+            xfer += iprot->readListBegin(_etype961, _size958);
+            this->success.resize(_size958);
+            uint32_t _i962;
+            for (_i962 = 0; _i962 < _size958; ++_i962)
             {
-              xfer += this->success[_i942].read(iprot);
+              xfer += this->success[_i962].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12868,10 +13167,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter943;
-      for (_iter943 = this->success.begin(); _iter943 != this->success.end(); ++_iter943)
+      std::vector<Partition> ::const_iterator _iter963;
+      for (_iter963 = this->success.begin(); _iter963 != this->success.end(); ++_iter963)
       {
-        xfer += (*_iter943).write(oprot);
+        xfer += (*_iter963).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -12920,14 +13219,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size944;
-            ::apache::thrift::protocol::TType _etype947;
-            xfer += iprot->readListBegin(_etype947, _size944);
-            (*(this->success)).resize(_size944);
-            uint32_t _i948;
-            for (_i948 = 0; _i948 < _size944; ++_i948)
+            uint32_t _size964;
+            ::apache::thrift::protocol::TType _etype967;
+            xfer += iprot->readListBegin(_etype967, _size964);
+            (*(this->success)).resize(_size964);
+            uint32_t _i968;
+            for (_i968 = 0; _i968 < _size964; ++_i968)
             {
-              xfer += (*(this->success))[_i948].read(iprot);
+              xfer += (*(this->success))[_i968].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13026,14 +13325,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size949;
-            ::apache::thrift::protocol::TType _etype952;
-            xfer += iprot->readListBegin(_etype952, _size949);
-            this->group_names.resize(_size949);
-            uint32_t _i953;
-            for (_i953 = 0; _i953 < _size949; ++_i953)
+            uint32_t _size969;
+            ::apache::thrift::protocol::TType _etype972;
+            xfer += iprot->readListBegin(_etype972, _size969);
+            this->group_names.resize(_size969);
+            uint32_t _i973;
+            for (_i973 = 0; _i973 < _size969; ++_i973)
             {
-              xfer += iprot->readString(this->group_names[_i953]);
+              xfer += iprot->readString(this->group_names[_i973]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13078,10 +13377,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter954;
-    for (_iter954 = this->group_names.begin(); _iter954 != this->group_names.end(); ++_iter954)
+    std::vector<std::string> ::const_iterator _iter974;
+    for (_iter974 = this->group_names.begin(); _iter974 != this->group_names.end(); ++_iter974)
     {
-      xfer += oprot->writeString((*_iter954));
+      xfer += oprot->writeString((*_iter974));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13121,10 +13420,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter955;
-    for (_iter955 = (*(this->group_names)).begin(); _iter955 != (*(this->group_names)).end(); ++_iter955)
+    std::vector<std::string> ::const_iterator _iter975;
+    for (_iter975 = (*(this->group_names)).begin(); _iter975 != (*(this->group_names)).end(); ++_iter975)
     {
-      xfer += oprot->writeString((*_iter955));
+      xfer += oprot->writeString((*_iter975));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13165,14 +13464,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size956;
-            ::apache::thrift::protocol::TType _etype959;
-            xfer += iprot->readListBegin(_etype959, _size956);
-            this->success.resize(_size956);
-            uint32_t _i960;
-            for (_i960 = 0; _i960 < _size956; ++_i960)
+            uint32_t _size976;
+            ::apache::thrift::protocol::TType _etype979;
+            xfer += iprot->readListBegin(_etype979, _size976);
+            this->success.resize(_size976);
+            uint32_t _i980;
+            for (_i980 = 0; _i980 < _size976; ++_i980)
             {
-              xfer += this->success[_i960].read(iprot);
+              xfer += this->success[_i980].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13219,10 +13518,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter961;
-      for (_iter961 = this->success.begin(); _iter961 != this->success.end(); ++_iter961)
+      std::vector<Partition> ::const_iterator _iter981;
+      for (_iter981 = this->success.begin(); _iter981 != this->success.end(); ++_iter981)
       {
-        xfer += (*_iter961).write(oprot);
+        xfer += (*_iter981).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -13271,14 +13570,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size962;
-            ::apache::thrift::protocol::TType _etype965;
-            xfer += iprot->readListBegin(_etype965, _size962);
-            (*(this->success)).resize(_size962);
-            uint32_t _i966;
-            for (_i966 = 0; _i966 < _size962; ++_i966)
+            uint32_t _size982;
+            ::apache::thrift::protocol::TType _etype985;
+            xfer += iprot->readListBegin(_etype985, _size982);
+            (*(this->success)).resize(_size982);
+            uint32_t _i986;
+            for (_i986 = 0; _i986 < _size982; ++_i986)
             {
-              xfer += (*(this->success))[_i966].read(iprot);
+              xfer += (*(this->success))[_i986].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13456,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size967;
-            ::apache::thrift::protocol::TType _etype970;
-            xfer += iprot->readListBegin(_etype970, _size967);
-            this->success.resize(_size967);
-            uint32_t _i971;
-            for (_i971 = 0; _i971 < _size967; ++_i971)
+            uint32_t _size987;
+            ::apache::thrift::protocol::TType _etype990;
+            xfer += iprot->readListBegin(_etype990, _size987);
+            this->success.resize(_size987);
+            uint32_t _i991;
+            for (_i991 = 0; _i991 < _size987; ++_i991)
             {
-              xfer += this->success[_i971].read(iprot);
+              xfer += this->success[_i991].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13510,10 +13809,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<PartitionSpec> ::const_iterator _iter972;
-      for (_iter972 = this->success.begin(); _iter972 != this->success.end(); ++_iter972)
+      std::vector<PartitionSpec> ::const_iterator _iter992;
+      for (_iter992 = this->success.begin(); _iter992 != this->success.end(); ++_iter992)
       {
-        xfer += (*_iter972).write(oprot);
+        xfer += (*_iter992).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -13562,14 +13861,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size973;
-            ::apache::thrift::protocol::TType _etype976;
-            xfer += iprot->readListBegin(_etype976, _size973);
-            (*(this->success)).resize(_size973);
-            uint32_t _i977;
-            for (_i977 = 0; _i977 < _size973; ++_i977)
+            uint32_t _size993;
+            ::apache::thrift::protocol::TType _etype996;
+            xfer += iprot->readListBegin(_etype996, _size993);
+            (*(this->success)).resize(_size993);
+            uint32_t _i997;
+            for (_i997 = 0; _i997 < _size993; ++_i997)
             {
-              xfer += (*(this->success))[_i977].read(iprot);
+              xfer += (*(this->success))[_i997].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13747,14 +14046,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size978;
-            ::apache::thrift::protocol::TType _etype981;
-            xfer += iprot->readListBegin(_etype981, _size978);
-            this->success.resize(_size978);
-            uint32_t _i982;
-            for (_i982 = 0; _i982 < _size978; ++_i982)
+            uint32_t _size998;
+            ::apache::thrift::protocol::TType _etype1001;
+            xfer += iprot->readListBegin(_etype1001, _size998);
+            this->success.resize(_size998);
+            uint32_t _i1002;
+            for (_i1002 = 0; _i1002 < _size998; ++_i1002)
             {
-              xfer += iprot->readString(this->success[_i982]);
+              xfer += iprot->readString(this->success[_i1002]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13793,10 +14092,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter983;
-      for (_iter983 = this->success.begin(); _iter983 != this->success.end(); ++_iter983)
+      std::vector<std::string> ::const_iterator _iter1003;
+      for (_iter1003 = this->success.begin(); _iter1003 != this->success.end(); ++_iter1003)
       {
-        xfer += oprot->writeString((*_iter983));
+        xfer += oprot->writeString((*_iter1003));
       }
       xfer += oprot->writeListEnd();
     }
@@ -13841,14 +14140,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size984;
-            ::apache::thrift::protocol::TType _etype987;
-            xfer += iprot->readListBegin(_etype987, _size984);
-            (*(this->success)).resize(_size984);
-            uint32_t _i988;
-            for (_i988 = 0; _i988 < _size984; ++_i988)
+            uint32_t _size1004;
+            ::apache::thrift::protocol::TType _etype1007;
+            xfer += iprot->readListBegin(_etype1007, _size1004);
+            (*(this->success)).resize(_size1004);
+            uint32_t

<TRUNCATED>

[14/55] [abbrv] hive git commit: HIVE-12223: Filter on Grouping__ID does not work properly (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12223: Filter on Grouping__ID does not work properly (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/175087ba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/175087ba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/175087ba

Branch: refs/heads/spark
Commit: 175087bafed97c07fffacdfae651faef327013ae
Parents: 0e94a1d
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Oct 22 09:06:30 2015 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Nov 5 09:35:00 2015 +0100

----------------------------------------------------------------------
 .../hive/ql/ppd/ExprWalkerProcFactory.java      | 12 ++++
 .../clientpositive/groupby_grouping_id3.q       | 22 +++++++
 .../clientpositive/groupby_grouping_id3.q.out   | 60 ++++++++++++++++++++
 3 files changed, 94 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/175087ba/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
index 9bd1847..a09dcef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
@@ -84,6 +85,17 @@ public final class ExprWalkerProcFactory {
         // replace the output expression with the input expression so that
         // parent op can understand this expression
         ExprNodeDesc exp = op.getColumnExprMap().get(colref.getColumn());
+        // if the operator is a groupby and we are referencing the grouping
+        // id column, we cannot push the predicate
+        if (op instanceof GroupByOperator) {
+          GroupByOperator groupBy = (GroupByOperator) op;
+          if (groupBy.getConf().isGroupingSetsPresent()) {
+            int groupingSetPlaceholderPos = groupBy.getConf().getKeys().size() - 1;
+            if (colref.getColumn().equals(groupBy.getSchema().getColumnNames().get(groupingSetPlaceholderPos))) {
+              exp = null;
+            }
+          }
+        }
         if (exp == null) {
           // means that expression can't be pushed either because it is value in
           // group by

http://git-wip-us.apache.org/repos/asf/hive/blob/175087ba/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_id3.q b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
new file mode 100644
index 0000000..c6746a8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
@@ -0,0 +1,22 @@
+CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1;
+
+set hive.cbo.enable = false;
+
+-- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1;
+
+set hive.cbo.enable = true;
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/175087ba/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
new file mode 100644
index 0000000..c305bfd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
@@ -0,0 +1,60 @@
+PREHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	NULL	1	2
+2	NULL	1	1
+3	NULL	1	2
+4	NULL	1	1
+PREHOOK: query: SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	NULL	1	2
+2	NULL	1	1
+3	NULL	1	2
+4	NULL	1	1


[12/55] [abbrv] hive git commit: Revert inadvertant addition of HiveConf.java.orig file

Posted by xu...@apache.org.
Revert inadvertant addition of HiveConf.java.orig file


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9ba2cdfd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9ba2cdfd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9ba2cdfd

Branch: refs/heads/spark
Commit: 9ba2cdfd9a8eae371048de3fa92131517d8f8d6d
Parents: 92620d8
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed Nov 4 14:18:03 2015 -0800
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed Nov 4 14:18:03 2015 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/conf/HiveConf.java.orig  | 3372 ------------------
 1 file changed, 3372 deletions(-)
----------------------------------------------------------------------



[07/55] [abbrv] hive git commit: HIVE-12327: WebHCat e2e tests TestJob_1 and TestJob_2 fail (Daniel Dai, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-12327: WebHCat e2e tests TestJob_1 and TestJob_2 fail (Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/16a86b26
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/16a86b26
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/16a86b26

Branch: refs/heads/spark
Commit: 16a86b2612439d6ba1f920deaea822ae4da3dedc
Parents: 13f8cfe
Author: Daniel Dai <da...@hortonworks.com>
Authored: Tue Nov 3 21:29:46 2015 -0800
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Tue Nov 3 21:29:46 2015 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/16a86b26/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java b/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
index 288043f..b020ffe 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
@@ -41,6 +41,7 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.regex.Pattern;
 
 public class WebHCatJTShim23 implements WebHCatJTShim {
   private static final Logger LOG = LoggerFactory.getLogger(WebHCatJTShim23.class);
@@ -139,7 +140,8 @@ public class WebHCatJTShim23 implements WebHCatJTShim {
     }
     catch(IOException ex) {
       String msg = ex.getMessage();
-      if(msg != null && msg.contains("ApplicationNotFoundException")) {
+      if(msg != null && (msg.contains("ApplicationNotFoundException") ||
+          Pattern.compile("History file.*not found").matcher(msg).find())) {
         LOG.info("Job(" + jobid + ") not found: " + msg);
         return null;
       }


[18/55] [abbrv] hive git commit: HIVE-12340 : ExecDriver.execute() unnecessarily sets METASTOREPWD to HIVE (Hari Subramaniyan, reviewed by Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-12340 : ExecDriver.execute() unnecessarily sets METASTOREPWD to HIVE (Hari Subramaniyan, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d33ddefc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d33ddefc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d33ddefc

Branch: refs/heads/spark
Commit: d33ddefcc5374f8bab1dadc4b083d0097edde623
Parents: a46729b
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Thu Nov 5 11:47:59 2015 -0800
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Thu Nov 5 11:47:59 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java     | 10 ----------
 1 file changed, 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d33ddefc/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 5cbf764..380cf08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -390,12 +390,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
         }
       }
 
-      // remove the pwd from conf file so that job tracker doesn't show this
-      // logs
-      String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
-      if (pwd != null) {
-        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
-      }
       JobClient jc = new JobClient(job);
       // make this client wait if job tracker is not behaving well.
       Throttle.checkJobTracker(job, LOG);
@@ -433,10 +427,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
       // Finally SUBMIT the JOB!
       rj = jc.submitJob(job);
-      // replace it back
-      if (pwd != null) {
-        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd);
-      }
 
       returnVal = jobExecHelper.progress(rj, jc, ctx.getHiveTxnManager());
       success = (returnVal == 0);


[02/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
index 8b6614e..0b14304 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
@@ -99,13 +99,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -122,14 +122,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -182,13 +182,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -205,14 +205,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -265,37 +265,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -348,13 +348,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -371,14 +371,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -431,13 +431,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -454,14 +454,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -514,13 +514,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -537,14 +537,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -643,7 +643,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
index edde023..8336999 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
@@ -25,7 +25,7 @@ POSTHOOK: query: select * from decimal_tbl_1_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555
+55555.000000000000000000
 PREHOOK: query: -- EXPLAIN
 -- SELECT dec, round(null), round(null, 0), round(125, null), 
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
@@ -121,7 +121,7 @@ FROM decimal_tbl_1_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555	55555	55555	55555	55555	55560	55600	56000	60000	100000	0	0	0
+55555	55555	55555.0	55555.00	55555.000	55560	55600	56000	60000	100000	0	0	0
 PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -150,7 +150,7 @@ POSTHOOK: query: select * from decimal_tbl_2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125.315	-125.315
+125.315000000000000000	-125.315000000000000000
 PREHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
@@ -240,7 +240,7 @@ FROM decimal_tbl_2_orc ORDER BY p
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125	125	125.3	125.32	125.315	125.315	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.315	-130	-100	0	0
+125	125	125.3	125.32	125.315	125.3150	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.3150	-130	-100	0	0
 PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -268,7 +268,7 @@ POSTHOOK: query: select * from decimal_tbl_3_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-3.141592653589793
+3.141592653589793000
 PREHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
@@ -402,7 +402,7 @@ FROM decimal_tbl_3_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.14159265359	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.141592653589793
+0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.141592653590	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.1415926535897930
 PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -431,7 +431,7 @@ POSTHOOK: query: select * from decimal_tbl_4_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_4_orc
 #### A masked pattern was here ####
-1809242.3151111344	-1809242.3151111344
+1809242.315111134400000000	-1809242.315111134400000000
 PREHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
index ffdb1c9..7dea1a2 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
@@ -73,16 +73,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
index cc22a56..6df956d 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
@@ -95,44 +95,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -178,44 +178,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -429,44 +429,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -512,44 +512,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -763,42 +763,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -849,29 +849,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -917,44 +917,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -1268,40 +1268,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1350,30 +1350,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1576,44 +1576,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1700,23 +1700,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1764,44 +1764,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1831,44 +1831,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2086,42 +2086,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -2255,44 +2255,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2596,7 +2596,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2663,7 +2663,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index 1cd5959..337d83f 100644
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@ -111,56 +111,56 @@ LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
--1073051226	-7382.0	-4409.2486486486	-5280.969230769231	-4409.2486486486
--1072081801	8373.0	5001.1702702703	5989.915384615385	5001.1702702703
--1072076362	-5470.0	-3267.2162162162	-3913.1538461538466	-3267.2162162162
--1070883071	-741.0	-442.5972972973	-530.1	-442.5972972973
--1070551679	-947.0	-565.6405405405	-677.4692307692308	-565.6405405405
--1069512165	11417.0	6819.3432432432	8167.546153846154	6819.3432432432
--1069109166	8390.0	5011.3243243243	6002.076923076923	5011.3243243243
--1068623584	-14005.0	-8365.1486486486	-10018.961538461539	-8365.1486486486
--1067386090	-3977.0	-2375.4513513514	-2845.084615384616	-2375.4513513514
--1066922682	-9987.0	-5965.2081081081	-7144.546153846154	-5965.2081081081
--1066226047	-9439.0	-5637.8891891892	-6752.515384615385	-5637.8891891892
--1065117869	2538.0	1515.9405405405	1815.646153846154	1515.9405405405
--1064949302	6454.0	3854.9567567568	4617.092307692308	3854.9567567568
--1063498122	-11480.0	-6856.972972973	-8212.615384615387	-6856.972972973
--1062973443	10541.0	6296.1108108108	7540.869230769231	6296.1108108108
--1061614989	-4234.0	-2528.9567567568	-3028.938461538462	-2528.9567567568
--1061057428	-1085.0	-648.0675675676	-776.1923076923077	-648.0675675676
--1059941909	8782.0	5245.4648648649	6282.507692307693	5245.4648648649
--1059338191	7322.0	4373.4108108108	5238.046153846154	4373.4108108108
--1059047258	12452.0	7437.5459459459	8907.969230769231	7437.5459459459
--1056684111	13991.0	8356.7864864865	10008.946153846155	8356.7864864865
--1055945837	13690.0	8177	9793.615384615387	8177
--1055669248	2570.0	1535.0540540541	1838.538461538462	1535.0540540541
--1055316250	-14990.0	-8953.4864864865	-10723.615384615385	-8953.4864864865
--1053385587	14504.0	8663.2	10375.938461538462	8663.2
--1053238077	-3704.0	-2212.3891891892	-2649.784615384616	-2212.3891891892
--1052745800	-12404.0	-7408.8756756757	-8873.630769230771	-7408.8756756757
--1052322972	-7433.0	-4439.7108108108	-5317.453846153847	-4439.7108108108
--1050684541	-8261.0	-4934.272972973	-5909.792307692308	-4934.272972973
--1050657303	-6999.0	-4180.4837837838	-5006.976923076923	-4180.4837837838
--1050165799	8634.0	5157.0648648649	6176.63076923077	5157.0648648649
+-1073051226	-7382.0	-4409.2486486486	-5280.96923076923100	-4409.2486486486
+-1072081801	8373.0	5001.1702702703	5989.91538461538500	5001.1702702703
+-1072076362	-5470.0	-3267.2162162162	-3913.15384615384660	-3267.2162162162
+-1070883071	-741.0	-442.5972972973	-530.10000000000000	-442.5972972973
+-1070551679	-947.0	-565.6405405405	-677.46923076923080	-565.6405405405
+-1069512165	11417.0	6819.3432432432	8167.54615384615400	6819.3432432432
+-1069109166	8390.0	5011.3243243243	6002.07692307692300	5011.3243243243
+-1068623584	-14005.0	-8365.1486486486	-10018.96153846153900	-8365.1486486486
+-1067386090	-3977.0	-2375.4513513514	-2845.08461538461600	-2375.4513513514
+-1066922682	-9987.0	-5965.2081081081	-7144.54615384615400	-5965.2081081081
+-1066226047	-9439.0	-5637.8891891892	-6752.51538461538500	-5637.8891891892
+-1065117869	2538.0	1515.9405405405	1815.64615384615400	1515.9405405405
+-1064949302	6454.0	3854.9567567568	4617.09230769230800	3854.9567567568
+-1063498122	-11480.0	-6856.9729729730	-8212.61538461538700	-6856.9729729730
+-1062973443	10541.0	6296.1108108108	7540.86923076923100	6296.1108108108
+-1061614989	-4234.0	-2528.9567567568	-3028.93846153846200	-2528.9567567568
+-1061057428	-1085.0	-648.0675675676	-776.19230769230770	-648.0675675676
+-1059941909	8782.0	5245.4648648649	6282.50769230769300	5245.4648648649
+-1059338191	7322.0	4373.4108108108	5238.04615384615400	4373.4108108108
+-1059047258	12452.0	7437.5459459459	8907.96923076923100	7437.5459459459
+-1056684111	13991.0	8356.7864864865	10008.94615384615500	8356.7864864865
+-1055945837	13690.0	8177.0000000000	9793.61538461538700	8177.0000000000
+-1055669248	2570.0	1535.0540540541	1838.53846153846200	1535.0540540541
+-1055316250	-14990.0	-8953.4864864865	-10723.61538461538500	-8953.4864864865
+-1053385587	14504.0	8663.2000000000	10375.93846153846200	8663.2000000000
+-1053238077	-3704.0	-2212.3891891892	-2649.78461538461600	-2212.3891891892
+-1052745800	-12404.0	-7408.8756756757	-8873.63076923077100	-7408.8756756757
+-1052322972	-7433.0	-4439.7108108108	-5317.45384615384700	-4439.7108108108
+-1050684541	-8261.0	-4934.2729729730	-5909.79230769230800	-4934.2729729730
+-1050657303	-6999.0	-4180.4837837838	-5006.97692307692300	-4180.4837837838
+-1050165799	8634.0	5157.0648648649	6176.63076923077000	5157.0648648649
 -1048934049	-524.0	-312.9837837838	-374.86153846153854	-312.9837837838
--1046399794	4130.0	2466.8378378378	2954.5384615384614	2466.8378378378
--1045867222	-8034.0	-4798.6864864865	-5747.400000000001	-4798.6864864865
--1045196363	-5039.0	-3009.7810810811	-3604.823076923077	-3009.7810810811
--1045181724	-5706.0	-3408.1783783784	-4081.9846153846156	-3408.1783783784
--1045087657	-5865.0	-3503.1486486486	-4195.7307692307695	-3503.1486486486
--1044207190	5381.0	3214.0567567568	3849.4846153846156	3214.0567567568
--1044093617	-3422.0	-2043.9513513514	-2448.046153846154	-2043.9513513514
--1043573508	16216.0	9685.772972973	11600.676923076924	9685.772972973
--1043132597	12302.0	7347.9513513514	8800.66153846154	7347.9513513514
--1043082182	9180.0	5483.1891891892	6567.2307692307695	5483.1891891892
--1042805968	5133.0	3065.927027027	3672.0692307692307	3065.927027027
--1042712895	9296.0	5552.4756756757	6650.215384615385	5552.4756756757
--1042396242	9583.0	5723.9	6855.53076923077	5723.9
--1041734429	-836.0	-499.3405405405	-598.0615384615385	-499.3405405405
--1041391389	-12970.0	-7746.9459459459	-9278.538461538463	-7746.9459459459
--1041252354	756.0	451.5567567568	540.8307692307692	451.5567567568
--1039776293	13704.0	8185.3621621622	9803.630769230771	8185.3621621622
--1039762548	-3802.0	-2270.9243243243	-2719.8923076923083	-2270.9243243243
+-1046399794	4130.0	2466.8378378378	2954.53846153846140	2466.8378378378
+-1045867222	-8034.0	-4798.6864864865	-5747.40000000000100	-4798.6864864865
+-1045196363	-5039.0	-3009.7810810811	-3604.82307692307700	-3009.7810810811
+-1045181724	-5706.0	-3408.1783783784	-4081.98461538461560	-3408.1783783784
+-1045087657	-5865.0	-3503.1486486486	-4195.73076923076950	-3503.1486486486
+-1044207190	5381.0	3214.0567567568	3849.48461538461560	3214.0567567568
+-1044093617	-3422.0	-2043.9513513514	-2448.04615384615400	-2043.9513513514
+-1043573508	16216.0	9685.7729729730	11600.67692307692400	9685.7729729730
+-1043132597	12302.0	7347.9513513514	8800.66153846154000	7347.9513513514
+-1043082182	9180.0	5483.1891891892	6567.23076923076950	5483.1891891892
+-1042805968	5133.0	3065.9270270270	3672.06923076923070	3065.9270270270
+-1042712895	9296.0	5552.4756756757	6650.21538461538500	5552.4756756757
+-1042396242	9583.0	5723.9000000000	6855.53076923077000	5723.9000000000
+-1041734429	-836.0	-499.3405405405	-598.06153846153850	-499.3405405405
+-1041391389	-12970.0	-7746.9459459459	-9278.53846153846300	-7746.9459459459
+-1041252354	756.0	451.5567567568	540.83076923076920	451.5567567568
+-1039776293	13704.0	8185.3621621622	9803.63076923077100	8185.3621621622
+-1039762548	-3802.0	-2270.9243243243	-2719.89230769230830	-2270.9243243243
 PREHOOK: query: SELECT sum(hash(*))
   FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
         WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/update_all_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/update_all_types.q.out b/ql/src/test/results/clientpositive/update_all_types.q.out
index 1cfa088..c5c1abb 100644
--- a/ql/src/test/results/clientpositive/update_all_types.q.out
+++ b/ql/src/test/results/clientpositive/update_all_types.q.out
@@ -96,11 +96,11 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-741	-1070883071	-1070883071	-1645852809	NULL	-741.0	NULL	NULL	1969-12-31	0ruyd6Y50JpdGRf6HqD	0ruyd6Y50JpdGRf6HqD	xH7445Rals48VOulSyR5F               	false
@@ -150,12 +150,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false
@@ -184,12 +184,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
index 72dc004..e0cf903 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
@@ -164,4 +164,4 @@ select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc
 #### A masked pattern was here ####
--4997414117561.546875	4994550248722.298828	-10252745435816.02441	-5399023399.587163986308583465
+-4997414117561.546875000000000000	4994550248722.298828000000000000	-10252745435816.024410000000000000	-5399023399.587163986308583465

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out
index a9b9a4b..b80da1b 100644
--- a/ql/src/test/results/clientpositive/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_in.q.out
@@ -594,34 +594,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
index 9edd6f1..e5d56ec 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
@@ -207,13 +207,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out
index 3ed833b..86f1677 100644
--- a/ql/src/test/results/clientpositive/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/vector_data_types.q.out
@@ -153,7 +153,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -239,7 +239,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
index 8a4d53a..ff82f38 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
@@ -1051,7 +1051,7 @@ POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1355944339.1234567
+1355944339.12345670
 PREHOOK: query: explain
 select cast(true as decimal) as c from decimal_2 order by c
 PREHOOK: type: QUERY
@@ -1406,7 +1406,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1
+1.0000000000000000000
 PREHOOK: query: explain
 select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
index 75f872e..eea91bb 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
@@ -47,43 +47,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -92,43 +92,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -139,43 +139,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -185,34 +185,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -222,34 +222,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -283,71 +283,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -356,10 +356,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -368,10 +368,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/vector_decimal_4.q.out
index 613f5a8..c7d3d9e 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -149,43 +149,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -195,43 +195,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
index 34c3351..0bfd12e 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
@@ -59,41 +59,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -103,32 +103,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -185,40 +185,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
index 9cdd7fc..e0ccbc6 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
@@ -119,27 +119,27 @@ NULL	0
 NULL	3
 NULL	4
 NULL	1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.00000	4400
+-1255.49000	-1255
+-1.12200	-11
+-1.12000	-1
+-0.33300	0
+-0.30000	0
+0.00000	0
+0.00000	0
+0.33300	0
+1.00000	1
+1.00000	1
+1.12000	1
+1.12200	1
+2.00000	2
+3.14000	3
+3.14000	3
+3.14000	4
+10.00000	10
 10.73433	5
-124	124
-125.2	125
+124.00000	124
+125.20000	125
 23232.23435	2
 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -151,27 +151,27 @@ POSTHOOK: Input: default@decimal_6_2
 #### A masked pattern was here ####
 NULL	0
 -1234567890.1235	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.0000	4400
+-1255.4900	-1255
+-1.1220	-11
+-1.1200	-1
+-0.3330	0
+-0.3000	0
+0.0000	0
+0.0000	0
+0.3330	0
+1.0000	1
+1.0000	1
+1.1200	1
+1.1220	1
+2.0000	2
+3.1400	3
+3.1400	3
+3.1400	4
+10.0000	10
 10.7343	5
-124	124
-125.2	125
+124.0000	124
+125.2000	125
 23232.2344	2
 2389432.2375	3
 2389432.2375	4
@@ -200,54 +200,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1


[10/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by xu...@apache.org.
HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/92620d8e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/92620d8e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/92620d8e

Branch: refs/heads/spark
Commit: 92620d8e3275231eadf6ee87b7a937860339f92d
Parents: 16a86b2
Author: Jimmy Xiang <jx...@apache.org>
Authored: Mon Nov 2 13:43:56 2015 -0800
Committer: Jimmy Xiang <jx...@apache.org>
Committed: Wed Nov 4 07:24:48 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/hooks/LineageLogger.java     |  1 +
 .../clientpositive/cbo_rp_lineage2.q.out        | 68 +++++++++---------
 .../test/results/clientpositive/lineage2.q.out  | 72 ++++++++++----------
 .../test/results/clientpositive/lineage3.q.out  | 60 ++++++++--------
 4 files changed, 101 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/92620d8e/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index 1146cae..178a2de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -169,6 +169,7 @@ public class LineageLogger implements ExecuteWithHookContext {
         }
         writer.name("engine").value(
           HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE));
+        writer.name("database").value(ss.getCurrentDatabase());
         writer.name("hash").value(getQueryHash(queryStr));
         writer.name("queryText").value(queryStr);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92620d8e/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
index 9fc1e7b..41f3d09 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
@@ -5,12 +5,12 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@src2
-{"version":"1.0","engine":"mr","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: select * from src1 where key is not null and value is not null limit 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[],"vertices":[]}
 238	val_238
 	
 311	val_311
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213
@@ -31,17 +31,17 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: insert into table dest1 select * from src2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select key k, dest1.value from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	val_238
 	
 311	val_311
@@ -97,7 +97,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -119,7 +119,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -140,7 +140,7 @@ PREHOOK: query: select key, count(1) a from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -161,7 +161,7 @@ PREHOOK: query: select key k, count(*) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -182,7 +182,7 @@ PREHOOK: query: select key k, count(value) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -203,7 +203,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[],"vertices":[]}
 	3
 val_146	3
 val_150	3
@@ -227,7 +227,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value order b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -237,7 +237,7 @@ PREHOOK: query: select key, length(value) from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	7
 	0
 311	7
@@ -292,7 +292,7 @@ PREHOOK: query: select length(value) + 3 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 10
 3
 10
@@ -347,7 +347,7 @@ PREHOOK: query: select 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
 5
 5
 5
@@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
 15
 15
 15
@@ -461,31 +461,31 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.val
 ue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertex
 Id":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.s
 rc2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN",
 "vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.v
 alue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vert
 exId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2
   select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"de
 fault.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select * from src1 where length(key) > 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[],"vertices":[]}
 238	val_238
 311	val_311
 255	val_255
@@ -503,7 +503,7 @@ PREHOOK: query: select * from src1 where length(key) > 2 and value > 'a'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[],"vertices":[]}
 238	val_238
 311	val_311
 255	val_255
@@ -523,14 +523,14 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest3
-{"version":"1.0","engine":"mr","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"
 },{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId"
 :"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2
   select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1
 .value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","ve
 rtexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE
@@ -552,7 +552,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@dest_l1
-{"version":"1.0","engine":"mr","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery1:j-subquery1:p1.key = null-subquery1:j-subquery1:t1.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery2:j-subquery2:p2.key = null-subquery2:j-subquery2:t2.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUM
 N","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery1:j-subquery1:p1.key = null-subquery1:j-subquery1:t1.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery2:j-subquery2:p2.key = null-subquery2:j-subquery2:t2.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":
 2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
 PREHOOK: query: drop table if exists emp
 PREHOOK: type: DROPTABLE
 PREHOOK: query: drop table if exists dept
@@ -593,7 +593,7 @@ PREHOOK: Input: default@dept
 PREHOOK: Input: default@emp
 PREHOOK: Input: default@project
 PREHOOK: Output: default@tgt
-{"version":"1.0","engine":"mr","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PREDICATE"},{"sources":[1
 1,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PRED
 ICATE"},{"sources":[11,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
 PREHOOK: query: drop table if exists dest_l2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile
@@ -604,7 +604,7 @@ PREHOOK: query: insert into dest_l2 values(0, 1, 100, 10000)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@dest_l2
-{"version":"1.0","engine":"mr","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 PREHOOK: query: select * from (
   select c1 + c2 x from dest_l2
   union all
@@ -612,7 +612,7 @@ PREHOOK: query: select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 101
 10000
 PREHOOK: query: drop table if exists dest_l3
@@ -625,7 +625,7 @@ PREHOOK: query: insert into dest_l3 values(0, "s1", "s2", 15)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__2
 PREHOOK: Output: default@dest_l3
-{"version":"1.0","engine":"mr","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: select sum(a.c1) over (partition by a.c1 order by a.id)
 from dest_l2 a
 where a.c2 != 10
@@ -634,7 +634,7 @@ having count(a.c2) > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"$win$_col_0","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"$win$_col_0","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]}
 1
 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
 from dest_l2 a join dest_l3 b on (a.id = b.id)
@@ -646,7 +646,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUM
 N","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":
 1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
 1	1	s2	15
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
@@ -659,7 +659,7 @@ PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
 concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
@@ -667,7 +667,7 @@ GROUP BY substr(src1.key,1,1)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT KEY._col1:0._col0)","edgeType":"PROJECTION"},{"sources":[3,5],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT KEY._col1:0._col0)","edgeType":"PROJECTION"},{"sources":[3,5],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 	7	1543.0
 1	3	1296.0
 2	6	21257.0


[43/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 3bc7e10..4690093 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -84,6 +84,8 @@ public class ThriftHiveMetastore {
 
     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException;
 
+    public List<TableMeta> get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types) throws MetaException, org.apache.thrift.TException;
+
     public List<String> get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException;
 
     public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
@@ -348,6 +350,8 @@ public class ThriftHiveMetastore {
 
     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1204,6 +1208,34 @@ public class ThriftHiveMetastore {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_tables failed: unknown result");
     }
 
+    public List<TableMeta> get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types) throws MetaException, org.apache.thrift.TException
+    {
+      send_get_table_meta(db_patterns, tbl_patterns, tbl_types);
+      return recv_get_table_meta();
+    }
+
+    public void send_get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types) throws org.apache.thrift.TException
+    {
+      get_table_meta_args args = new get_table_meta_args();
+      args.setDb_patterns(db_patterns);
+      args.setTbl_patterns(tbl_patterns);
+      args.setTbl_types(tbl_types);
+      sendBase("get_table_meta", args);
+    }
+
+    public List<TableMeta> recv_get_table_meta() throws MetaException, org.apache.thrift.TException
+    {
+      get_table_meta_result result = new get_table_meta_result();
+      receiveBase(result, "get_table_meta");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_table_meta failed: unknown result");
+    }
+
     public List<String> get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException
     {
       send_get_all_tables(db_name);
@@ -5131,6 +5163,44 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public void get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_table_meta_call method_call = new get_table_meta_call(db_patterns, tbl_patterns, tbl_types, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_table_meta_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String db_patterns;
+      private String tbl_patterns;
+      private List<String> tbl_types;
+      public get_table_meta_call(String db_patterns, String tbl_patterns, List<String> tbl_types, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.db_patterns = db_patterns;
+        this.tbl_patterns = tbl_patterns;
+        this.tbl_types = tbl_types;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_table_meta", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_table_meta_args args = new get_table_meta_args();
+        args.setDb_patterns(db_patterns);
+        args.setTbl_patterns(tbl_patterns);
+        args.setTbl_types(tbl_types);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<TableMeta> getResult() throws MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_table_meta();
+      }
+    }
+
     public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_all_tables_call method_call = new get_all_tables_call(db_name, resultHandler, this, ___protocolFactory, ___transport);
@@ -9061,6 +9131,7 @@ public class ThriftHiveMetastore {
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
       processMap.put("get_tables", new get_tables());
+      processMap.put("get_table_meta", new get_table_meta());
       processMap.put("get_all_tables", new get_all_tables());
       processMap.put("get_table", new get_table());
       processMap.put("get_table_objects_by_name", new get_table_objects_by_name());
@@ -9731,6 +9802,30 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public static class get_table_meta<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_table_meta_args> {
+      public get_table_meta() {
+        super("get_table_meta");
+      }
+
+      public get_table_meta_args getEmptyArgsInstance() {
+        return new get_table_meta_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_table_meta_result getResult(I iface, get_table_meta_args args) throws org.apache.thrift.TException {
+        get_table_meta_result result = new get_table_meta_result();
+        try {
+          result.success = iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        }
+        return result;
+      }
+    }
+
     public static class get_all_tables<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_all_tables_args> {
       public get_all_tables() {
         super("get_all_tables");
@@ -12558,6 +12653,7 @@ public class ThriftHiveMetastore {
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
       processMap.put("get_tables", new get_tables());
+      processMap.put("get_table_meta", new get_table_meta());
       processMap.put("get_all_tables", new get_all_tables());
       processMap.put("get_table", new get_table());
       processMap.put("get_table_objects_by_name", new get_table_objects_by_name());
@@ -13991,6 +14087,63 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public static class get_table_meta<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_meta_args, List<TableMeta>> {
+      public get_table_meta() {
+        super("get_table_meta");
+      }
+
+      public get_table_meta_args getEmptyArgsInstance() {
+        return new get_table_meta_args();
+      }
+
+      public AsyncMethodCallback<List<TableMeta>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<TableMeta>>() { 
+          public void onComplete(List<TableMeta> o) {
+            get_table_meta_result result = new get_table_meta_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_table_meta_result result = new get_table_meta_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_table_meta_args args, org.apache.thrift.async.AsyncMethodCallback<List<TableMeta>> resultHandler) throws TException {
+        iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types,resultHandler);
+      }
+    }
+
     public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
       public get_all_tables() {
         super("get_all_tables");
@@ -40884,15 +41037,960 @@ public class ThriftHiveMetastore {
       }
     }
 
-    private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory {
-      public drop_table_with_environment_context_resultStandardScheme getScheme() {
-        return new drop_table_with_environment_context_resultStandardScheme();
+    private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory {
+      public drop_table_with_environment_context_resultStandardScheme getScheme() {
+        return new drop_table_with_environment_context_resultStandardScheme();
+      }
+    }
+
+    private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme<drop_table_with_environment_context_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // O1
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o1 = new NoSuchObjectException();
+                struct.o1.read(iprot);
+                struct.setO1IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // O3
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o3 = new MetaException();
+                struct.o3.read(iprot);
+                struct.setO3IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.o1 != null) {
+          oprot.writeFieldBegin(O1_FIELD_DESC);
+          struct.o1.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o3 != null) {
+          oprot.writeFieldBegin(O3_FIELD_DESC);
+          struct.o3.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory {
+      public drop_table_with_environment_context_resultTupleScheme getScheme() {
+        return new drop_table_with_environment_context_resultTupleScheme();
+      }
+    }
+
+    private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme<drop_table_with_environment_context_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetO1()) {
+          optionals.set(0);
+        }
+        if (struct.isSetO3()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.isSetO1()) {
+          struct.o1.write(oprot);
+        }
+        if (struct.isSetO3()) {
+          struct.o3.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.o1 = new NoSuchObjectException();
+          struct.o1.read(iprot);
+          struct.setO1IsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.o3 = new MetaException();
+          struct.o3.read(iprot);
+          struct.setO3IsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class get_tables_args implements org.apache.thrift.TBase<get_tables_args, get_tables_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_tables_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_args");
+
+    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("pattern", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_tables_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_tables_argsTupleSchemeFactory());
+    }
+
+    private String db_name; // required
+    private String pattern; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      DB_NAME((short)1, "db_name"),
+      PATTERN((short)2, "pattern");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // DB_NAME
+            return DB_NAME;
+          case 2: // PATTERN
+            return PATTERN;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.PATTERN, new org.apache.thrift.meta_data.FieldMetaData("pattern", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap);
+    }
+
+    public get_tables_args() {
+    }
+
+    public get_tables_args(
+      String db_name,
+      String pattern)
+    {
+      this();
+      this.db_name = db_name;
+      this.pattern = pattern;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_tables_args(get_tables_args other) {
+      if (other.isSetDb_name()) {
+        this.db_name = other.db_name;
+      }
+      if (other.isSetPattern()) {
+        this.pattern = other.pattern;
+      }
+    }
+
+    public get_tables_args deepCopy() {
+      return new get_tables_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.db_name = null;
+      this.pattern = null;
+    }
+
+    public String getDb_name() {
+      return this.db_name;
+    }
+
+    public void setDb_name(String db_name) {
+      this.db_name = db_name;
+    }
+
+    public void unsetDb_name() {
+      this.db_name = null;
+    }
+
+    /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
+    public boolean isSetDb_name() {
+      return this.db_name != null;
+    }
+
+    public void setDb_nameIsSet(boolean value) {
+      if (!value) {
+        this.db_name = null;
+      }
+    }
+
+    public String getPattern() {
+      return this.pattern;
+    }
+
+    public void setPattern(String pattern) {
+      this.pattern = pattern;
+    }
+
+    public void unsetPattern() {
+      this.pattern = null;
+    }
+
+    /** Returns true if field pattern is set (has been assigned a value) and false otherwise */
+    public boolean isSetPattern() {
+      return this.pattern != null;
+    }
+
+    public void setPatternIsSet(boolean value) {
+      if (!value) {
+        this.pattern = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case DB_NAME:
+        if (value == null) {
+          unsetDb_name();
+        } else {
+          setDb_name((String)value);
+        }
+        break;
+
+      case PATTERN:
+        if (value == null) {
+          unsetPattern();
+        } else {
+          setPattern((String)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case DB_NAME:
+        return getDb_name();
+
+      case PATTERN:
+        return getPattern();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case DB_NAME:
+        return isSetDb_name();
+      case PATTERN:
+        return isSetPattern();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof get_tables_args)
+        return this.equals((get_tables_args)that);
+      return false;
+    }
+
+    public boolean equals(get_tables_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_db_name = true && this.isSetDb_name();
+      boolean that_present_db_name = true && that.isSetDb_name();
+      if (this_present_db_name || that_present_db_name) {
+        if (!(this_present_db_name && that_present_db_name))
+          return false;
+        if (!this.db_name.equals(that.db_name))
+          return false;
+      }
+
+      boolean this_present_pattern = true && this.isSetPattern();
+      boolean that_present_pattern = true && that.isSetPattern();
+      if (this_present_pattern || that_present_pattern) {
+        if (!(this_present_pattern && that_present_pattern))
+          return false;
+        if (!this.pattern.equals(that.pattern))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_db_name = true && (isSetDb_name());
+      list.add(present_db_name);
+      if (present_db_name)
+        list.add(db_name);
+
+      boolean present_pattern = true && (isSetPattern());
+      list.add(present_pattern);
+      if (present_pattern)
+        list.add(pattern);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(get_tables_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetDb_name()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetPattern()).compareTo(other.isSetPattern());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetPattern()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pattern, other.pattern);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_tables_args(");
+      boolean first = true;
+
+      sb.append("db_name:");
+      if (this.db_name == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.db_name);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("pattern:");
+      if (this.pattern == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.pattern);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_tables_argsStandardSchemeFactory implements SchemeFactory {
+      public get_tables_argsStandardScheme getScheme() {
+        return new get_tables_argsStandardScheme();
+      }
+    }
+
+    private static class get_tables_argsStandardScheme extends StandardScheme<get_tables_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // DB_NAME
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.db_name = iprot.readString();
+                struct.setDb_nameIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // PATTERN
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.pattern = iprot.readString();
+                struct.setPatternIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.db_name != null) {
+          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+          oprot.writeString(struct.db_name);
+          oprot.writeFieldEnd();
+        }
+        if (struct.pattern != null) {
+          oprot.writeFieldBegin(PATTERN_FIELD_DESC);
+          oprot.writeString(struct.pattern);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_tables_argsTupleSchemeFactory implements SchemeFactory {
+      public get_tables_argsTupleScheme getScheme() {
+        return new get_tables_argsTupleScheme();
+      }
+    }
+
+    private static class get_tables_argsTupleScheme extends TupleScheme<get_tables_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetDb_name()) {
+          optionals.set(0);
+        }
+        if (struct.isSetPattern()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.isSetDb_name()) {
+          oprot.writeString(struct.db_name);
+        }
+        if (struct.isSetPattern()) {
+          oprot.writeString(struct.pattern);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.db_name = iprot.readString();
+          struct.setDb_nameIsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.pattern = iprot.readString();
+          struct.setPatternIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class get_tables_result implements org.apache.thrift.TBase<get_tables_result, get_tables_result._Fields>, java.io.Serializable, Cloneable, Comparable<get_tables_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_result");
+
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
+    private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_tables_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_tables_resultTupleSchemeFactory());
+    }
+
+    private List<String> success; // required
+    private MetaException o1; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success"),
+      O1((short)1, "o1");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
+          case 1: // O1
+            return O1;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+      tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap);
+    }
+
+    public get_tables_result() {
+    }
+
+    public get_tables_result(
+      List<String> success,
+      MetaException o1)
+    {
+      this();
+      this.success = success;
+      this.o1 = o1;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_tables_result(get_tables_result other) {
+      if (other.isSetSuccess()) {
+        List<String> __this__success = new ArrayList<String>(other.success);
+        this.success = __this__success;
+      }
+      if (other.isSetO1()) {
+        this.o1 = new MetaException(other.o1);
+      }
+    }
+
+    public get_tables_result deepCopy() {
+      return new get_tables_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+      this.o1 = null;
+    }
+
+    public int getSuccessSize() {
+      return (this.success == null) ? 0 : this.success.size();
+    }
+
+    public java.util.Iterator<String> getSuccessIterator() {
+      return (this.success == null) ? null : this.success.iterator();
+    }
+
+    public void addToSuccess(String elem) {
+      if (this.success == null) {
+        this.success = new ArrayList<String>();
+      }
+      this.success.add(elem);
+    }
+
+    public List<String> getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(List<String> success) {
+      this.success = success;
+    }
+
+    public void unsetSuccess() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return this.success != null;
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public MetaException getO1() {
+      return this.o1;
+    }
+
+    public void setO1(MetaException o1) {
+      this.o1 = o1;
+    }
+
+    public void unsetO1() {
+      this.o1 = null;
+    }
+
+    /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO1() {
+      return this.o1 != null;
+    }
+
+    public void setO1IsSet(boolean value) {
+      if (!value) {
+        this.o1 = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((List<String>)value);
+        }
+        break;
+
+      case O1:
+        if (value == null) {
+          unsetO1();
+        } else {
+          setO1((MetaException)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return getSuccess();
+
+      case O1:
+        return getO1();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return isSetSuccess();
+      case O1:
+        return isSetO1();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof get_tables_result)
+        return this.equals((get_tables_result)that);
+      return false;
+    }
+
+    public boolean equals(get_tables_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_success = true && this.isSetSuccess();
+      boolean that_present_success = true && that.isSetSuccess();
+      if (this_present_success || that_present_success) {
+        if (!(this_present_success && that_present_success))
+          return false;
+        if (!this.success.equals(that.success))
+          return false;
+      }
+
+      boolean this_present_o1 = true && this.isSetO1();
+      boolean that_present_o1 = true && that.isSetO1();
+      if (this_present_o1 || that_present_o1) {
+        if (!(this_present_o1 && that_present_o1))
+          return false;
+        if (!this.o1.equals(that.o1))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_success = true && (isSetSuccess());
+      list.add(present_success);
+      if (present_success)
+        list.add(success);
+
+      boolean present_o1 = true && (isSetO1());
+      list.add(present_o1);
+      if (present_o1)
+        list.add(o1);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(get_tables_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSuccess()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO1()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_tables_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o1:");
+      if (this.o1 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o1);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_tables_resultStandardSchemeFactory implements SchemeFactory {
+      public get_tables_resultStandardScheme getScheme() {
+        return new get_tables_resultStandardScheme();
       }
     }
 
-    private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme<drop_table_with_environment_context_result> {
+    private static class get_tables_resultStandardScheme extends StandardScheme<get_tables_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -40902,20 +42000,29 @@ public class ThriftHiveMetastore {
             break;
           }
           switch (schemeField.id) {
-            case 1: // O1
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-                struct.o1 = new NoSuchObjectException();
-                struct.o1.read(iprot);
-                struct.setO1IsSet(true);
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+                {
+                  org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list650.size);
+                  String _elem651;
+                  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
+                  {
+                    _elem651 = iprot.readString();
+                    struct.success.add(_elem651);
+                  }
+                  iprot.readListEnd();
+                }
+                struct.setSuccessIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
-            case 2: // O3
+            case 1: // O1
               if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-                struct.o3 = new MetaException();
-                struct.o3.read(iprot);
-                struct.setO3IsSet(true);
+                struct.o1 = new MetaException();
+                struct.o1.read(iprot);
+                struct.setO1IsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
@@ -40929,91 +42036,115 @@ public class ThriftHiveMetastore {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
+            for (String _iter653 : struct.success)
+            {
+              oprot.writeString(_iter653);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
         if (struct.o1 != null) {
           oprot.writeFieldBegin(O1_FIELD_DESC);
           struct.o1.write(oprot);
           oprot.writeFieldEnd();
         }
-        if (struct.o3 != null) {
-          oprot.writeFieldBegin(O3_FIELD_DESC);
-          struct.o3.write(oprot);
-          oprot.writeFieldEnd();
-        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
 
     }
 
-    private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory {
-      public drop_table_with_environment_context_resultTupleScheme getScheme() {
-        return new drop_table_with_environment_context_resultTupleScheme();
+    private static class get_tables_resultTupleSchemeFactory implements SchemeFactory {
+      public get_tables_resultTupleScheme getScheme() {
+        return new get_tables_resultTupleScheme();
       }
     }
 
-    private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme<drop_table_with_environment_context_result> {
+    private static class get_tables_resultTupleScheme extends TupleScheme<get_tables_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
-        if (struct.isSetO1()) {
+        if (struct.isSetSuccess()) {
           optionals.set(0);
         }
-        if (struct.isSetO3()) {
+        if (struct.isSetO1()) {
           optionals.set(1);
         }
         oprot.writeBitSet(optionals, 2);
+        if (struct.isSetSuccess()) {
+          {
+            oprot.writeI32(struct.success.size());
+            for (String _iter654 : struct.success)
+            {
+              oprot.writeString(_iter654);
+            }
+          }
+        }
         if (struct.isSetO1()) {
           struct.o1.write(oprot);
         }
-        if (struct.isSetO3()) {
-          struct.o3.write(oprot);
-        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
-          struct.o1 = new NoSuchObjectException();
-          struct.o1.read(iprot);
-          struct.setO1IsSet(true);
+          {
+            org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list655.size);
+            String _elem656;
+            for (int _i657 = 0; _i657 < _list655.size; ++_i657)
+            {
+              _elem656 = iprot.readString();
+              struct.success.add(_elem656);
+            }
+          }
+          struct.setSuccessIsSet(true);
         }
         if (incoming.get(1)) {
-          struct.o3 = new MetaException();
-          struct.o3.read(iprot);
-          struct.setO3IsSet(true);
+          struct.o1 = new MetaException();
+          struct.o1.read(iprot);
+          struct.setO1IsSet(true);
         }
       }
     }
 
   }
 
-  public static class get_tables_args implements org.apache.thrift.TBase<get_tables_args, get_tables_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_tables_args>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_args");
+  public static class get_table_meta_args implements org.apache.thrift.TBase<get_table_meta_args, get_table_meta_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_table_meta_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_args");
 
-    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1);
-    private static final org.apache.thrift.protocol.TField PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("pattern", org.apache.thrift.protocol.TType.STRING, (short)2);
+    private static final org.apache.thrift.protocol.TField DB_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("db_patterns", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField TBL_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_patterns", org.apache.thrift.protocol.TType.STRING, (short)2);
+    private static final org.apache.thrift.protocol.TField TBL_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_types", org.apache.thrift.protocol.TType.LIST, (short)3);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new get_tables_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new get_tables_argsTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new get_table_meta_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_table_meta_argsTupleSchemeFactory());
     }
 
-    private String db_name; // required
-    private String pattern; // required
+    private String db_patterns; // required
+    private String tbl_patterns; // required
+    private List<String> tbl_types; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      DB_NAME((short)1, "db_name"),
-      PATTERN((short)2, "pattern");
+      DB_PATTERNS((short)1, "db_patterns"),
+      TBL_PATTERNS((short)2, "tbl_patterns"),
+      TBL_TYPES((short)3, "tbl_types");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -41028,10 +42159,12 @@ public class ThriftHiveMetastore {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
-          case 1: // DB_NAME
-            return DB_NAME;
-          case 2: // PATTERN
-            return PATTERN;
+          case 1: // DB_PATTERNS
+            return DB_PATTERNS;
+          case 2: // TBL_PATTERNS
+            return TBL_PATTERNS;
+          case 3: // TBL_TYPES
+            return TBL_TYPES;
           default:
             return null;
         }
@@ -41075,109 +42208,165 @@ public class ThriftHiveMetastore {
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-      tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+      tmpMap.put(_Fields.DB_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("db_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-      tmpMap.put(_Fields.PATTERN, new org.apache.thrift.meta_data.FieldMetaData("pattern", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+      tmpMap.put(_Fields.TBL_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("tbl_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.TBL_TYPES, new org.apache.thrift.meta_data.FieldMetaData("tbl_types", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_args.class, metaDataMap);
     }
 
-    public get_tables_args() {
+    public get_table_meta_args() {
     }
 
-    public get_tables_args(
-      String db_name,
-      String pattern)
+    public get_table_meta_args(
+      String db_patterns,
+      String tbl_patterns,
+      List<String> tbl_types)
     {
       this();
-      this.db_name = db_name;
-      this.pattern = pattern;
+      this.db_patterns = db_patterns;
+      this.tbl_patterns = tbl_patterns;
+      this.tbl_types = tbl_types;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public get_tables_args(get_tables_args other) {
-      if (other.isSetDb_name()) {
-        this.db_name = other.db_name;
+    public get_table_meta_args(get_table_meta_args other) {
+      if (other.isSetDb_patterns()) {
+        this.db_patterns = other.db_patterns;
       }
-      if (other.isSetPattern()) {
-        this.pattern = other.pattern;
+      if (other.isSetTbl_patterns()) {
+        this.tbl_patterns = other.tbl_patterns;
+      }
+      if (other.isSetTbl_types()) {
+        List<String> __this__tbl_types = new ArrayList<String>(other.tbl_types);
+        this.tbl_types = __this__tbl_types;
       }
     }
 
-    public get_tables_args deepCopy() {
-      return new get_tables_args(this);
+    public get_table_meta_args deepCopy() {
+      return new get_table_meta_args(this);
     }
 
     @Override
     public void clear() {
-      this.db_name = null;
-      this.pattern = null;
+      this.db_patterns = null;
+      this.tbl_patterns = null;
+      this.tbl_types = null;
     }
 
-    public String getDb_name() {
-      return this.db_name;
+    public String getDb_patterns() {
+      return this.db_patterns;
     }
 
-    public void setDb_name(String db_name) {
-      this.db_name = db_name;
+    public void setDb_patterns(String db_patterns) {
+      this.db_patterns = db_patterns;
     }
 
-    public void unsetDb_name() {
-      this.db_name = null;
+    public void unsetDb_patterns() {
+      this.db_patterns = null;
     }
 
-    /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
-    public boolean isSetDb_name() {
-      return this.db_name != null;
+    /** Returns true if field db_patterns is set (has been assigned a value) and false otherwise */
+    public boolean isSetDb_patterns() {
+      return this.db_patterns != null;
     }
 
-    public void setDb_nameIsSet(boolean value) {
+    public void setDb_patternsIsSet(boolean value) {
       if (!value) {
-        this.db_name = null;
+        this.db_patterns = null;
       }
     }
 
-    public String getPattern() {
-      return this.pattern;
+    public String getTbl_patterns() {
+      return this.tbl_patterns;
     }
 
-    public void setPattern(String pattern) {
-      this.pattern = pattern;
+    public void setTbl_patterns(String tbl_patterns) {
+      this.tbl_patterns = tbl_patterns;
     }
 
-    public void unsetPattern() {
-      this.pattern = null;
+    public void unsetTbl_patterns() {
+      this.tbl_patterns = null;
     }
 
-    /** Returns true if field pattern is set (has been assigned a value) and false otherwise */
-    public boolean isSetPattern() {
-      return this.pattern != null;
+    /** Returns true if field tbl_patterns is set (has been assigned a value) and false otherwise */
+    public boolean isSetTbl_patterns() {
+      return this.tbl_patterns != null;
     }
 
-    public void setPatternIsSet(boolean value) {
+    public void setTbl_patternsIsSet(boolean value) {
       if (!value) {
-        this.pattern = null;
+        this.tbl_patterns = null;
+      }
+    }
+
+    public int getTbl_typesSize() {
+      return (this.tbl_types == null) ? 0 : this.tbl_types.size();
+    }
+
+    public java.util.Iterator<String> getTbl_typesIterator() {
+      return (this.tbl_types == null) ? null : this.tbl_types.iterator();
+    }
+
+    public void addToTbl_types(String elem) {
+      if (this.tbl_types == null) {
+        this.tbl_types = new ArrayList<String>();
+      }
+      this.tbl_types.add(elem);
+    }
+
+    public List<String> getTbl_types() {
+      return this.tbl_types;
+    }
+
+    public void setTbl_types(List<String> tbl_types) {
+      this.tbl_types = tbl_types;
+    }
+
+    public void unsetTbl_types() {
+      this.tbl_types = null;
+    }
+
+    /** Returns true if field tbl_types is set (has been assigned a value) and false otherwise */
+    public boolean isSetTbl_types() {
+      return this.tbl_types != null;
+    }
+
+    public void setTbl_typesIsSet(boolean value) {
+      if (!value) {
+        this.tbl_types = null;
       }
     }
 
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
-      case DB_NAME:
+      case DB_PATTERNS:
         if (value == null) {
-          unsetDb_name();
+          unsetDb_patterns();
         } else {
-          setDb_name((String)value);
+          setDb_patterns((String)value);
         }
         break;
 
-      case PATTERN:
+      case TBL_PATTERNS:
         if (value == null) {
-          unsetPattern();
+          unsetTbl_patterns();
         } else {
-          setPattern((String)value);
+          setTbl_patterns((String)value);
+        }
+        break;
+
+      case TBL_TYPES:
+        if (value == null) {
+          unsetTbl_types();
+        } else {
+          setTbl_types((List<String>)value);
         }
         break;
 
@@ -41186,11 +42375,14 @@ public class ThriftHiveMetastore {
 
     public Object getFieldValue(_Fields field) {
       switch (field) {
-      case DB_NAME:
-        return getDb_name();
+      case DB_PATTERNS:
+        return getDb_patterns();
 
-      case PATTERN:
-        return getPattern();
+      case TBL_PATTERNS:
+        return getTbl_patterns();
+
+      case TBL_TYPES:
+        return getTbl_types();
 
       }
       throw new IllegalStateException();
@@ -41203,10 +42395,12 @@ public class ThriftHiveMetastore {
       }
 
       switch (field) {
-      case DB_NAME:
-        return isSetDb_name();
-      case PATTERN:
-        return isSetPattern();
+      case DB_PATTERNS:
+        return isSetDb_patterns();
+      case TBL_PATTERNS:
+        return isSetTbl_patterns();
+      case TBL_TYPES:
+        return isSetTbl_types();
       }
       throw new IllegalStateException();
     }
@@ -41215,30 +42409,39 @@ public class ThriftHiveMetastore {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof get_tables_args)
-        return this.equals((get_tables_args)that);
+      if (that instanceof get_table_meta_args)
+        return this.equals((get_table_meta_args)that);
       return false;
     }
 
-    public boolean equals(get_tables_args that) {
+    public boolean equals(get_table_meta_args that) {
       if (that == null)
         return false;
 
-      boolean this_present_db_name = true && this.isSetDb_name();
-      boolean that_present_db_name = true && that.isSetDb_name();
-      if (this_present_db_name || that_present_db_name) {
-        if (!(this_present_db_name && that_present_db_name))
+      boolean this_present_db_patterns = true && this.isSetDb_patterns();
+      boolean that_present_db_patterns = true && that.isSetDb_patterns();
+      if (this_present_db_patterns || that_present_db_patterns) {
+        if (!(this_present_db_patterns && that_present_db_patterns))
           return false;
-        if (!this.db_name.equals(that.db_name))
+        if (!this.db_patterns.equals(that.db_patterns))
           return false;
       }
 
-      boolean this_present_pattern = true && this.isSetPattern();
-      boolean that_present_pattern = true && that.isSetPattern();
-      if (this_present_pattern || that_present_pattern) {
-        if (!(this_present_pattern && that_present_pattern))
+      boolean this_present_tbl_patterns = true && this.isSetTbl_patterns();
+      boolean that_present_tbl_patterns = true && that.isSetTbl_patterns();
+      if (this_present_tbl_patterns || that_present_tbl_patterns) {
+        if (!(this_present_tbl_patterns && that_present_tbl_patterns))
           return false;
-        if (!this.pattern.equals(that.pattern))
+        if (!this.tbl_patterns.equals(that.tbl_patterns))
+          return false;
+      }
+
+      boolean this_present_tbl_types = true && this.isSetTbl_types();
+      boolean that_present_tbl_types = true && that.isSetTbl_types();
+      if (this_present_tbl_types || that_present_tbl_types) {
+        if (!(this_present_tbl_types && that_present_tbl_types))
+          return false;
+        if (!this.tbl_types.equals(that.tbl_types))
           return false;
       }
 
@@ -41249,43 +42452,58 @@ public class ThriftHiveMetastore {
     public int hashCode() {
       List<Object> list = new ArrayList<Object>();
 
-      boolean present_db_name = true && (isSetDb_name());
-      list.add(present_db_name);
-      if (present_db_name)
-        list.add(db_name);
+      boolean present_db_patterns = true && (isSetDb_patterns());
+      list.add(present_db_patterns);
+      if (present_db_patterns)
+        list.add(db_patterns);
 
-      boolean present_pattern = true && (isSetPattern());
-      list.add(present_pattern);
-      if (present_pattern)
-        list.add(pattern);
+      boolean present_tbl_patterns = true && (isSetTbl_patterns());
+      list.add(present_tbl_patterns);
+      if (present_tbl_patterns)
+        list.add(tbl_patterns);
+
+      boolean present_tbl_types = true && (isSetTbl_types());
+      list.add(present_tbl_types);
+      if (present_tbl_types)
+        list.add(tbl_types);
 
       return list.hashCode();
     }
 
     @Override
-    public int compareTo(get_tables_args other) {
+    public int compareTo(get_table_meta_args other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
 
       int lastComparison = 0;
 
-      lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+      lastComparison = Boolean.valueOf(isSetDb_patterns()).compareTo(other.isSetDb_patterns());
       if (lastComparison != 0) {
         return lastComparison;
       }
-      if (isSetDb_name()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+      if (isSetDb_patterns()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_patterns, other.db_patterns);
         if (lastComparison != 0) {
           return lastComparison;
         }
       }
-      lastComparison = Boolean.valueOf(isSetPattern()).compareTo(other.isSetPattern());
+      lastComparison = Boolean.valueOf(isSetTbl_patterns()).compareTo(other.isSetTbl_patterns());
       if (lastComparison != 0) {
         return lastComparison;
       }
-      if (isSetPattern()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pattern, other.pattern);
+      if (isSetTbl_patterns()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_patterns, other.tbl_patterns);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetTbl_types()).compareTo(other.isSetTbl_types());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetTbl_types()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_types, other.tbl_types);
         if (lastComparison != 0) {
           return lastComparison;
         }
@@ -41307,22 +42525,30 @@ public class ThriftHiveMetastore {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("get_tables_args(");
+      StringBuilder sb = new StringBuilder("get_table_meta_args(");
       boolean first = true;
 
-      sb.append("db_name:");
-      if (this.db_name == null) {
+      sb.append("db_patterns:");
+      if (this.db_patterns == null) {
         sb.append("null");
       } else {
-        sb.append(this.db_name);
+        sb.append(this.db_patterns);
       }
       first = false;
       if (!first) sb.append(", ");
-      sb.append("pattern:");
-      if (this.pattern == null) {
+      sb.append("tbl_patterns:");
+      if (this.tbl_patterns == null) {
         sb.append("null");
       } else {
-        sb.append(this.pattern);
+        sb.append(this.tbl_patterns);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("tbl_types:");
+      if (this.tbl_types == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tbl_types);
       }
       first = false;
       sb.append(")");
@@ -41350,15 +42576,15 @@ public class ThriftHiveMetastore {
       }
     }
 
-    private static class get_tables_argsStandardSchemeFactory implements SchemeFactory {
-      public get_tables_argsStandardScheme getScheme() {
-        return new get_tables_argsStandardScheme();
+    private static class get_table_meta_argsStandardSchemeFactory implements SchemeFactory {
+      public get_table_meta_argsStandardScheme getScheme() {
+        return new get_table_meta_argsStandardScheme();
       }
     }
 
-    private static class get_tables_argsStandardScheme extends StandardScheme<get_tables_args> {
+    private static class get_table_meta_argsStandardScheme extends StandardScheme<get_table_meta_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -41368,18 +42594,36 @@ public class ThriftHiveMetastore {
             break;
           }
           switch (schemeField.id) {
-            case 1: // DB_NAME
+            case 1: // DB_PATTERNS
               if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.db_name = iprot.readString();
-                struct.setDb_nameIsSet(true);
+                struct.db_patterns = iprot.readString();
+                struct.setDb_patternsIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
-            case 2: // PATTERN
+            case 2: // TBL_PATTERNS
               if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.pattern = iprot.readString();
-                struct.setPatternIsSet(true);
+                struct.tbl_patterns = iprot.readString();
+                struct.setTbl_patternsIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 3: // TBL_TYPES
+              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+                {
+                  org.apache.thrift.protocol.TList _list658 = iprot.readListBegin();
+                  struct.tbl_types = new ArrayList<String>(_list658.size);
+                  String _elem659;
+                  for (int _i660 = 0; _i660 < _list658.size; ++_i660)
+                  {
+                    _elem659 = iprot.readString();
+                    struct.tbl_types.add(_elem659);
+                  }
+                  iprot.readListEnd();
+                }
+                struct.setTbl_typesIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
@@ -41393,18 +42637,30 @@ public class ThriftHiveMetastore {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_args struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.db_name != null) {
-          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-          oprot.writeString(struct.db_name);
+        if (struct.db_patterns != null) {
+          oprot.writeFieldBegin(DB_PATTERNS_FIELD_DESC);
+          oprot.writeString(struct.db_patterns);
           oprot.writeFieldEnd();
         }
-        if (struct.pattern != null) {
-          oprot.writeFieldBegin(PATTERN_FIELD_DESC);
-          oprot.writeString(struct.pattern);
+        if (struct.tbl_patterns != null) {
+          oprot.writeFieldBegin(TBL_PATTERNS_FIELD_DESC);
+          oprot.writeString(struct.tbl_patterns);
+          oprot.writeFieldEnd();
+        }
+        if (struct.tbl_types != null) {
+          oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
+            for (String _iter661 : struct.tbl_types)
+            {
+              oprot.writeString(_iter661);
+            }
+            oprot.writeListEnd();
+          }
           oprot.writeFieldEnd();
         }
         oprot.writeFieldStop();
@@ -41413,63 +42669,88 @@ public class ThriftHiveMetastore {
 
     }
 
-    private static class get_tables_argsTupleSchemeFactory implements SchemeFactory {
-      public get_tables_argsTupleScheme getScheme() {
-        return new get_tables_argsTupleScheme();
+    private static class get_table_meta_argsTupleSchemeFactory implements SchemeFactory {
+      public get_table_meta_argsTupleScheme getScheme() {
+        return new get_table_meta_argsTupleScheme();
       }
     }
 
-    private static class get_tables_argsTupleScheme extends TupleScheme<get_tables_args> {
+    private static class get_table_meta_argsTupleScheme extends TupleScheme<get_table_meta_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
-        if (struct.isSetDb_name()) {
+        if (struct.isSetDb_patterns()) {
           optionals.set(0);
         }
-        if (struct.isSetPattern()) {
+        if (struct.isSetTbl_patterns()) {
           optionals.set(1);
         }
-        oprot.writeBitSet(optionals, 2);
-        if (struct.isSetDb_name()) {
-          oprot.writeString(struct.db_name);
+        if (struct.isSetTbl_types()) {
+          optionals.set(2);
         }
-        if (struct.isSetPattern()) {
-          oprot.writeString(struct.pattern);
+        oprot.writeBitSet(optionals, 3);
+        if (struct.isSetDb_patterns()) {
+          oprot.writeString(struct.db_patterns);
+        }
+        if (struct.isSetTbl_patterns()) {
+          oprot.writeString(struct.tbl_patterns);
+        }
+        if (struct.isSetTbl_types()) {
+          {
+            oprot.writeI32(struct.tbl_types.size());
+            for (String _iter662 : struct.tbl_types)
+            {
+              oprot.writeString(_iter662);
+            }
+          }
         }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(2);
+        BitSet incoming = iprot.readBitSet(3);
         if (incoming.get(0)) {
-          struct.db_name = iprot.readString();
-          struct.setDb_nameIsSet(true);
+          struct.db_patterns = iprot.readString();
+          struct.setDb_patternsIsSet(true);
         }
         if (incoming.get(1)) {
-          struct.pattern = iprot.readString();
-          struct.setPatternIsSet(true);
+          struct.tbl_patterns = iprot.readString();
+          struct.setTbl_patternsIsSet(true);
+        }
+        if (incoming.get(2)) {
+          {
+            org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_types = new ArrayList<String>(_list663.size);
+            String _elem664;
+            for (int _i665 = 0; _i665 < _list663.size; ++_i665)
+            {
+              _elem664 = iprot.readString();
+              struct.tbl_types.add(_elem664);
+            }
+          }
+          struct.setTbl_typesIsSet(true);
         }
       }
     }
 
   }
 
-  public static class get_tables_result implements org.apache.thrift.TBase<get_tables_result, get_tables_result._Fields>, java.io.Serializable, Cloneable, Comparable<get_tables_result>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_result");
+  public static class get_table_meta_result implements org.apache.thrift.TBase<get_table_meta_result, get_table_meta_result._Fields>, java.io.Serializable, Cloneable, Comparable<get_table_meta_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_result");
 
     private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
     private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new get_tables_resultStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new get_tables_resultTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new get_table_meta_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_table_meta_resultTupleSchemeFactory());
     }
 
-    private List<String> success; // required
+    private List<TableMeta> success; // required
     private MetaException o1; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -41539,18 +42820,18 @@ public class ThriftHiveMetastore {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableMeta.class))));
       tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_result.class, metaDataMap);
     }
 
-    public get_tables_result() {
+    public get_table_meta_result() {
     }
 
-    public get_tables_result(
-      List<String> success,
+    public get_table_meta_result(
+      List<TableMeta> success,
       MetaException o1)
     {
       this();
@@ -41561,9 +42842,12 @@ public class ThriftHiveMetastore {
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public get_tables_result(get_tables_result other) {
+    public get_table_meta_result(get_table_meta_result other) {
       if (other.isSetSuccess()) {
-        List<String> __this__success = new ArrayList<String>(other.success);
+        List<TableMeta> __this__success = new ArrayList<TableMeta>(other.success.size());
+        for (TableMeta other_element : other.success) {
+          __this__success.add(new TableMeta(other_element));
+        }
         this.success = __this__success;
       }
       if (other.isSetO1()) {
@@ -41571,8 +42855,8 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public get_tables_result deepCopy() {
-      return new get_tables_result(this);
+    public get_table_meta_result deepCopy() {
+      return new get_table_meta_result(this);
     }
 
     @Override
@@ -41585,22 +42869,22 @@ public class ThriftHiveMetastore {
       return (this.success == null) ? 0 : this.success.size();
     }
 
-    public java.util.Iterator<String> getSuccessIterator() {
+    public java.util.Iterator<TableMeta> getSuccessIterator() {
       return (this.success == null) ? null : this.success.iterator();
     }
 
-    public void addToSuccess(String elem) {
+    public void addToSuccess(TableMeta elem) {
       if (this.success == null) {
-        this.success = new ArrayList<String>();
+        this.success = new ArrayList<TableMeta>();
       }
       this.success.add(elem);
     }
 
-    public List<String> getSuccess() {
+    public List<TableMeta> getSuccess() {
       return this.success;
     }
 
-    public void setSuccess(List<String> success) {
+    public void setSuccess(List<TableMeta> success) {
       this.success = success;
     }
 
@@ -41648,7 +42932,7 @@ public class ThriftHiveMetastore {
         if (value == null) {
           unsetSuccess();
         } else {
-          setSuccess((List<String>)value);
+          setSuccess((List<TableMeta>)value);
         }
         break;
 
@@ -41694,12 +42978,12 @@ public class ThriftHiveMetastore {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof get_tables_result)
-        return this.equals((get_tables_result)that);
+      if (that instanceof get_table_meta_result)
+        return this.equals((get_table_meta_result)that);
       return false;
     }
 
-    public boolean equals(get_tables_result that) {
+    public boolean equals(get_table_meta_result that) {
       if (that == null)
         return false;
 
@@ -41742,7 +43026,7 @@ public class ThriftHiveMetastore {
     }
 
     @Override
-    public int compareTo(get_tables_result other) {
+    public int compareTo(get_table_meta_result other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
@@ -41786,7 +43070,7 @@ public class ThriftHiveMetastore {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("get_tables_result(");
+      StringBuilder sb = new StringBuilder("get_table_meta_result(");
       boolean first = true;
 
       sb.append("success:");
@@ -41829,15 +43113,15 @@ public class ThriftHiveMetastore {
       }
     }
 
-    private static class get_tables_resultStandardSchemeFactory implements SchemeFactory {
-      public get_tables_resultStandardScheme getScheme() {
-        return new get_tables_resultStandardScheme();
+    private static class get_table_meta_resultStandardSchemeFactory implements SchemeFactory {
+      public get_table_meta_resultStandardScheme getScheme() {
+        return new get_table_meta_resultStandardScheme();
       }
     }
 
-    private static class get_tables_resultStandardScheme extends StandardScheme<get_tables_result> {
+    private static class get_table_meta_resultStandardScheme extends StandardScheme<get_table_meta_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -41850,13 +43134,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list650.size);
-                  String _elem651;
-                  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
+                  org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
+                  struct.success = new ArrayList<TableMeta>(_list666.size);
+                  TableMeta _elem667;
+                  for (int _i668 = 0; _i668 < _list666.size; ++_i668)
                   {
-                    _elem651 = iprot.readString();
-                    struct.success.add(_elem651);
+                    _elem667 = new TableMeta();
+                    _elem667.read(iprot);
+                    struct.success.add(_elem667);
                   }
                   iprot.readListEnd();
                 }
@@ -41883,17 +43168,17 @@ public class ThriftHiveMetastore {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
         if (struct.success != null) {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter653 : struct.success)
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
+            for (TableMeta _iter669 : struct.success)
             {
-              oprot.writeString(_iter653);
+              _iter669.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -41910,16 +43195,16 @@ public class ThriftHiveMetastore {
 
     }
 
-    private static class get_tables_resultTupleSchemeFactory implements SchemeFactory {
-      public get_tables_resultTupleScheme getScheme() {
-        return new get_tables_resultTupleScheme();
+    private static class get_table_meta_resultTupleSchemeFactory implements SchemeFactory {
+      public get_table_meta_resultTupleScheme getScheme() {
+        return new get_table_meta_resultTupleScheme();
       }
     }
 
-    private static class get_tables_resultTupleScheme extends TupleScheme<get_tables_result> {
+    private static class get_table_meta_resultTupleScheme extends TupleScheme<get_table_meta_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.isSetSuccess()) {
@@ -41932,9 +43217,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter654 : struct.success)
+            for (TableMeta _iter670 : struct.success)
             {
-              oprot.writeString(_iter654);
+              _iter670.write(oprot);
             }
           }
         }
@@ -41944,18 +43229,19 @@ public class ThriftHiveMetastore {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list655.size);
-            String _elem656;
-            for (int _i657 = 0; _i657 < _list655.size; ++_i657)
+            org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<TableMeta>(_list671.size);
+            TableMeta _elem672;
+            for (int _i673 = 0; _i673 < _list671.size; ++_i673)
             {
-              _elem656 = iprot.readString();
-              struct.success.add(_elem656);
+              _elem672 = new TableMeta();
+              _elem672.read(iprot);
+              struct.success.add(_elem672);
             }
           }
           struct.setSuccessIsSet(true);
@@ -42721,13 +44007,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list658 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list658.size);
-                  String _elem659;
-                  for (int _i660 = 0; _i660 < _list658.size; ++_i660)
+                  org.apache.thrift.protocol.TList _list674 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list674.size);
+                  String _elem675;
+                  for (int _i676 = 0; _i676 < _list674.size; ++_i676)
                   {
-                    _elem659 = iprot.readString();
-                    struct.success.add(_elem659);
+                    _elem675 = iprot.readString();
+                    struct.success.add(_elem675);
                   }
                   i

<TRUNCATED>

[38/55] [abbrv] hive git commit: HIVE-11201 : HCatalog is ignoring user specified avro schema in the table definition (Bing Li via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11201 : HCatalog  is ignoring user specified avro schema in the table definition (Bing Li via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d5a69ec8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d5a69ec8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d5a69ec8

Branch: refs/heads/spark
Commit: d5a69ec8c078cdfd8cb01eae21009cd6366d6644
Parents: 41b60c4
Author: Bing Li <sa...@gmail.com>
Authored: Tue Jul 7 23:56:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Nov 9 17:51:06 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hive/hcatalog/mapreduce/SpecialCases.java  | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d5a69ec8/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
index 1bf6f07..756abf8 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
@@ -112,14 +112,14 @@ public class SpecialCases {
         colTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(field.getTypeString()));
       }
 
-      jobProperties.put(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(),
+      if (jobProperties.get(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName())==null
+          || jobProperties.get(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()).isEmpty()) {
+     
+        jobProperties.put(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(),
           AvroSerDe.getSchemaFromCols(properties, colNames, colTypes, null).toString());
-
-
-      for (String propName : jobProperties.keySet()){
-        String propVal = jobProperties.get(propName);
       }
 
+
     }
   }
 


[46/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
 HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b678ed85
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b678ed85
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b678ed85

Branch: refs/heads/spark
Commit: b678ed85da7b25c0883426b4ee53a014857d0e1b
Parents: 16521c4
Author: aihuaxu <ai...@apache.org>
Authored: Tue Nov 10 15:37:08 2015 -0500
Committer: aihuaxu <ai...@apache.org>
Committed: Tue Nov 10 15:37:08 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |  208 +-
 .../apache/hive/jdbc/HiveDatabaseMetaData.java  |    4 +-
 metastore/if/hive_metastore.thrift              |    9 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2323 +++++++-----
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  148 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |    5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  349 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   65 +
 .../hadoop/hive/metastore/api/TableMeta.java    |  701 ++++
 .../hive/metastore/api/ThriftHiveMetastore.java | 3406 ++++++++++++------
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1275 ++++---
 .../src/gen/thrift/gen-php/metastore/Types.php  |  144 +
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       |  883 +++--
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  110 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   25 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   65 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   21 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   32 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |    7 +
 .../hadoop/hive/metastore/ObjectStore.java      |  112 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |    4 +
 .../hadoop/hive/metastore/hbase/HBaseStore.java |   44 +-
 .../DummyRawStoreControlledCommit.java          |    7 +
 .../DummyRawStoreForJdoConnection.java          |    7 +
 .../ql/metadata/SessionHiveMetaStoreClient.java |   64 +-
 .../cli/operation/GetTablesOperation.java       |   47 +-
 .../cli/operation/MetadataOperation.java        |   23 +-
 28 files changed, 7128 insertions(+), 2967 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 2b3fdf1..5450eaa 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -42,6 +42,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.InputStream;
+import java.lang.Exception;
+import java.lang.Object;
+import java.lang.String;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
@@ -56,8 +59,11 @@ import java.sql.Timestamp;
 import java.sql.Types;
 import java.text.ParseException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Date;
+import java.util.HashSet;
 import java.util.HashMap;
+import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -134,6 +140,7 @@ public class TestJdbcDriver2 {
         stmt1.execute("DROP DATABASE " + db + " CASCADE");
       }
     }
+    stmt1.execute("create database testdb");
     stmt1.close();
     con1.close();
   }
@@ -147,12 +154,20 @@ public class TestJdbcDriver2 {
 
     stmt.execute("set hive.support.concurrency = false");
 
-    // drop table. ignore error.
-    try {
-      stmt.execute("drop table " + tableName);
-    } catch (Exception ex) {
-      fail(ex.toString());
-    }
+    createTestTables(stmt, "", true);
+    createTestTables(stmt, "testdb.", false);
+  }
+
+  private void createTestTables(Statement stmt, String prefix, boolean loadData)
+      throws SQLException {
+
+    // drop test tables/views
+    dropTestTables(stmt, prefix);
+
+    String tableName = prefix + this.tableName;
+    String partitionedTableName = prefix + this.partitionedTableName;
+    String dataTypeTableName = prefix + this.dataTypeTableName;
+    String viewName = prefix + this.viewName;
 
     // create table
     stmt.execute("create table " + tableName
@@ -160,35 +175,25 @@ public class TestJdbcDriver2 {
         + tableComment + "'");
 
     // load data
-    stmt.execute("load data local inpath '"
-        + dataFilePath.toString() + "' into table " + tableName);
-
-    // also initialize a paritioned table to test against.
-
-    // drop table. ignore error.
-    try {
-      stmt.execute("drop table " + partitionedTableName);
-    } catch (Exception ex) {
-      fail(ex.toString());
+    if (loadData) {
+      stmt.execute("load data local inpath '"
+          + dataFilePath.toString() + "' into table " + tableName);
     }
 
+    // also initialize a paritioned table to test against.
     stmt.execute("create table " + partitionedTableName
         + " (under_col int, value string) comment '"+partitionedTableComment
         +"' partitioned by (" + partitionedColumnName + " STRING)");
 
     // load data
-    stmt.execute("load data local inpath '"
-        + dataFilePath.toString() + "' into table " + partitionedTableName
-        + " PARTITION (" + partitionedColumnName + "="
-        + partitionedColumnValue + ")");
-
-    // drop table. ignore error.
-    try {
-      stmt.execute("drop table " + dataTypeTableName);
-    } catch (Exception ex) {
-      fail(ex.toString());
+    if (loadData) {
+      stmt.execute("load data local inpath '"
+          + dataFilePath.toString() + "' into table " + partitionedTableName
+          + " PARTITION (" + partitionedColumnName + "="
+          + partitionedColumnValue + ")");
     }
 
+    // tables with various types
     stmt.execute("create table " + dataTypeTableName
         + " (c1 int, c2 boolean, c3 double, c4 string,"
         + " c5 array<int>, c6 map<int,string>, c7 map<string,string>,"
@@ -208,15 +213,10 @@ public class TestJdbcDriver2 {
         + ") comment'" + dataTypeTableComment
         +"' partitioned by (dt STRING)");
 
-    stmt.execute("load data local inpath '"
-        + dataTypeDataFilePath.toString() + "' into table " + dataTypeTableName
-        + " PARTITION (dt='20090619')");
-
-    // drop view. ignore error.
-    try {
-      stmt.execute("drop view " + viewName);
-    } catch (Exception ex) {
-      fail(ex.toString());
+    if (loadData) {
+      stmt.execute("load data local inpath '"
+          + dataTypeDataFilePath.toString() + "' into table " + dataTypeTableName
+          + " PARTITION (dt='20090619')");
     }
 
     // create view
@@ -224,6 +224,28 @@ public class TestJdbcDriver2 {
         +"' as select * from "+ tableName);
   }
 
+  // drop test tables/views. ignore error.
+  private void dropTestTables(Statement stmt, String prefix) throws SQLException {
+    String tableName = prefix + this.tableName;
+    String partitionedTableName = prefix + this.partitionedTableName;
+    String dataTypeTableName = prefix + this.dataTypeTableName;
+    String viewName = prefix + this.viewName;
+
+    executeWithIgnore(stmt, "drop table " + tableName);
+    executeWithIgnore(stmt, "drop table " + partitionedTableName);
+    executeWithIgnore(stmt, "drop table " + dataTypeTableName);
+    executeWithIgnore(stmt, "drop view " + viewName);
+  }
+
+  private void executeWithIgnore(Statement stmt, String sql) throws SQLException {
+  // drop table. ignore error.
+    try {
+      stmt.execute(sql);
+    } catch (Exception ex) {
+      fail(ex.toString());
+    }
+  }
+
   private static Connection getConnection(String postfix) throws SQLException {
     Connection con1;
     if (standAloneServer) {
@@ -244,9 +266,8 @@ public class TestJdbcDriver2 {
     // drop table
     Statement stmt = con.createStatement();
     assertNotNull("Statement is null", stmt);
-    stmt.execute("drop table " + tableName);
-    stmt.execute("drop table " + partitionedTableName);
-    stmt.execute("drop table " + dataTypeTableName);
+    dropTestTables(stmt, "");
+    dropTestTables(stmt, "testdb.");
 
     con.close();
     assertTrue("Connection should be closed", con.isClosed());
@@ -1123,25 +1144,77 @@ public class TestJdbcDriver2 {
    * @throws SQLException
    */
   private void getTablesTest(String tableTypeName, String viewTypeName) throws SQLException {
-    Map<String, Object[]> tests = new HashMap<String, Object[]>();
-    tests.put("test%jdbc%", new Object[]{"testhivejdbcdriver_table"
-        , "testhivejdbcdriverpartitionedtable"
-        , "testhivejdbcdriverview"});
-    tests.put("%jdbcdriver\\_table", new Object[]{"testhivejdbcdriver_table"});
-    tests.put("testhivejdbcdriver\\_table", new Object[]{"testhivejdbcdriver_table"});
-    tests.put("test_ivejdbcdri_er\\_table", new Object[]{"testhivejdbcdriver_table"});
-    tests.put("test_ivejdbcdri_er_table", new Object[]{"testhivejdbcdriver_table"});
-    tests.put("test_ivejdbcdri_er%table", new Object[]{
-        "testhivejdbcdriver_table", "testhivejdbcdriverpartitionedtable" });
-    tests.put("%jdbc%", new Object[]{ "testhivejdbcdriver_table"
-        , "testhivejdbcdriverpartitionedtable"
-        , "testhivejdbcdriverview"});
-    tests.put("", new Object[]{});
-
-    for (String checkPattern: tests.keySet()) {
-      ResultSet rs = con.getMetaData().getTables("default", null, checkPattern, null);
+    String[] ALL = null;
+    String[] VIEW_ONLY = {viewTypeName};
+    String[] TABLE_ONLY = {tableTypeName};
+    String[] VIEWORTABLE = {tableTypeName, viewTypeName};
+
+    Map<Object[], String[]> tests = new IdentityHashMap<Object[], String[]>();
+    tests.put(new Object[] { null, "test%jdbc%", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "default.testhivejdbcdriverpartitionedtable",
+        "default.testhivejdbcdriverview",
+        "testdb.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriverpartitionedtable",
+        "testdb.testhivejdbcdriverview"});
+    tests.put(new Object[] { "test%", "test%jdbc%", ALL}, new String[]{
+        "testdb.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriverpartitionedtable",
+        "testdb.testhivejdbcdriverview"});
+    tests.put(new Object[] { "test%", "test%jdbc%", VIEW_ONLY}, new String[]{
+        "testdb.testhivejdbcdriverview"});
+
+    tests.put(new Object[] { null, "%jdbcdriver\\_table", VIEWORTABLE}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriver_table"});
+    tests.put(new Object[] { "def%", "%jdbcdriver\\_table", VIEWORTABLE}, new String[]{
+        "default.testhivejdbcdriver_table"});
+    tests.put(new Object[] { "def%", "%jdbcdriver\\_table", VIEW_ONLY}, new String[0]);
+
+    tests.put(new Object[] { null, "testhivejdbcdriver\\_table", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriver_table"});
+    tests.put(new Object[] { "%faul%", "testhivejdbcdriver\\_table", ALL}, new String[]{
+        "default.testhivejdbcdriver_table"});
+    tests.put(new Object[] { "%faul%", "testhivejdbcdriver\\_table", TABLE_ONLY}, new String[]{
+        "default.testhivejdbcdriver_table"});
+
+    tests.put(new Object[] { null, "test_ivejdbcdri_er\\_table", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriver_table"});
+    tests.put(new Object[] { "test__", "test_ivejdbcdri_er\\_table", ALL}, new String[]{
+        "testdb.testhivejdbcdriver_table"});
+
+    tests.put(new Object[] { null, "test_ivejdbcdri_er_table", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriver_table"});
+    tests.put(new Object[] { null, "test_ivejdbcdri_er%table", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "default.testhivejdbcdriverpartitionedtable",
+        "testdb.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriverpartitionedtable"});
+    tests.put(new Object[] { null, "%jdbc%", ALL}, new String[]{
+        "default.testhivejdbcdriver_table",
+        "default.testhivejdbcdriverpartitionedtable",
+        "default.testhivejdbcdriverview",
+        "testdb.testhivejdbcdriver_table",
+        "testdb.testhivejdbcdriverpartitionedtable",
+        "testdb.testhivejdbcdriverview"});
+    tests.put(new Object[] { "%", "%jdbc%", VIEW_ONLY}, new String[]{
+        "default.testhivejdbcdriverview",
+        "testdb.testhivejdbcdriverview"});
+    tests.put(new Object[] { null, "", ALL}, new String[]{});
+
+    for (Map.Entry<Object[], String[]> entry : tests.entrySet()) {
+      Object[] checkPattern = entry.getKey();
+      String debugString = checkPattern[0] + ", " + checkPattern[1] + ", " +
+          Arrays.toString((String[]) checkPattern[2]);
+
+      Set<String> expectedTables = new HashSet<String>(Arrays.asList(entry.getValue()));
+      ResultSet rs = con.getMetaData().getTables(null,
+          (String)checkPattern[0], (String)checkPattern[1], (String[])checkPattern[2]);
       ResultSetMetaData resMeta = rs.getMetaData();
-      assertEquals(5, resMeta.getColumnCount());
+      assertEquals(10, resMeta.getColumnCount());
       assertEquals("TABLE_CAT", resMeta.getColumnName(1));
       assertEquals("TABLE_SCHEM", resMeta.getColumnName(2));
       assertEquals("TABLE_NAME", resMeta.getColumnName(3));
@@ -1150,9 +1223,11 @@ public class TestJdbcDriver2 {
 
       int cnt = 0;
       while (rs.next()) {
+        String resultDbName = rs.getString("TABLE_SCHEM");
         String resultTableName = rs.getString("TABLE_NAME");
-        assertEquals("Get by index different from get by name.", rs.getString(3), resultTableName);
-        assertEquals("Excpected a different table.", tests.get(checkPattern)[cnt], resultTableName);
+        assertTrue("Invalid table " + resultDbName + "." + resultTableName + " for test " + debugString,
+            expectedTables.contains(resultDbName + "." + resultTableName));
+
         String resultTableComment = rs.getString("REMARKS");
         assertTrue("Missing comment on the table.", resultTableComment.length()>0);
         String tableType = rs.getString("TABLE_TYPE");
@@ -1164,18 +1239,9 @@ public class TestJdbcDriver2 {
         cnt++;
       }
       rs.close();
-      assertEquals("Received an incorrect number of tables.", tests.get(checkPattern).length, cnt);
+      assertEquals("Received an incorrect number of tables for test " + debugString,
+          expectedTables.size(), cnt);
     }
-
-    // only ask for the views.
-    ResultSet rs = con.getMetaData().getTables("default", null, null
-        , new String[]{viewTypeName});
-    int cnt=0;
-    while (rs.next()) {
-      cnt++;
-    }
-    rs.close();
-    assertEquals("Incorrect number of views found.", 1, cnt);
   }
 
   @Test
@@ -1198,6 +1264,8 @@ public class TestJdbcDriver2 {
 
     assertTrue(rs.next());
     assertEquals("default", rs.getString(1));
+    assertTrue(rs.next());
+    assertEquals("testdb", rs.getString(1));
 
     assertFalse(rs.next());
     rs.close();
@@ -1270,7 +1338,7 @@ public class TestJdbcDriver2 {
     tests.put(new String[]{"%jdbcdriver\\_table%", "_%"}, 2);
 
     for (String[] checkPattern: tests.keySet()) {
-      ResultSet rs = con.getMetaData().getColumns(null, null, checkPattern[0],
+      ResultSet rs = con.getMetaData().getColumns(null, "default", checkPattern[0],
           checkPattern[1]);
 
       // validate the metadata for the getColumns result set

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
index 13e42b5..a73f443 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
@@ -646,9 +646,7 @@ public class HiveDatabaseMetaData implements DatabaseMetaData {
     if (types != null) {
       getTableReq.setTableTypes(Arrays.asList(types));
     }
-    if (schemaPattern != null) {
-      getTableReq.setSchemaName(schemaPattern);
-    }
+    getTableReq.setSchemaName(schemaPattern);
 
     try {
       getTableResp = client.GetTables(getTableReq);

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 98fd42b..bb754f1 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -772,6 +772,13 @@ struct GetAllFunctionsResponse {
   1: optional list<Function> functions
 }
 
+struct TableMeta {
+  1: required string dbName;
+  2: required string tableName;
+  3: required string tableType;
+  4: optional string comments;
+}
+
 exception MetaException {
   1: string message
 }
@@ -890,6 +897,8 @@ service ThriftHiveMetastore extends fb303.FacebookService
       4:EnvironmentContext environment_context)
                        throws(1:NoSuchObjectException o1, 2:MetaException o3)
   list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
+  list<TableMeta> get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list<string> tbl_types)
+                       throws (1: MetaException o1)
   list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
 
   Table get_table(1:string dbname, 2:string tbl_name)


[40/55] [abbrv] hive git commit: HIVE-7575 GetTables thrift call is very slow (Navis via Aihua Xu, reviewed by Szehon Ho, Aihua Xu)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index a100e9f..9a1d159 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -220,6 +221,12 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
+  public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+      throws MetaException {
+    return objectStore.getTableMeta(dbNames, tableNames, tableTypes);
+  }
+
+  @Override
   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
       throws MetaException, UnknownDBException {
     return objectStore.getTableObjectsByName(dbName, tableNames);

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index f6100e6..8dde0af 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -224,6 +225,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
+  public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+      throws MetaException {
+    return Collections.emptyList();
+  }
+
+  @Override
   public List<Table> getTableObjectsByName(String dbname, List<String> tableNames)
       throws MetaException, UnknownDBException {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 7af9d85..581a919 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -161,11 +162,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     Matcher matcher = pattern.matcher("");
     Set<String> combinedTableNames = new HashSet<String>();
     for (String tableName : tables.keySet()) {
-      if (matcher == null) {
-        matcher = pattern.matcher(tableName);
-      } else {
-        matcher.reset(tableName);
-      }
+      matcher.reset(tableName);
       if (matcher.matches()) {
         combinedTableNames.add(tableName);
       }
@@ -177,6 +174,55 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     Collections.sort(tableNames);
     return tableNames;
   }
+  
+  @Override
+  public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+      throws MetaException {
+    List<TableMeta> tableMetas = super.getTableMeta(dbPatterns, tablePatterns, tableTypes);
+    Map<String, Map<String, Table>> tmpTables = getTempTables();
+    if (tmpTables.isEmpty()) {
+      return tableMetas;
+    }
+
+    List<Matcher> dbPatternList = new ArrayList<>();
+    for (String element : dbPatterns.split("\\|")) {
+      dbPatternList.add(Pattern.compile(element.replaceAll("\\*", ".*")).matcher(""));
+    }
+    List<Matcher> tblPatternList = new ArrayList<>();
+    for (String element : tablePatterns.split("\\|")) {
+      tblPatternList.add(Pattern.compile(element.replaceAll("\\*", ".*")).matcher(""));
+    }
+    StringBuilder builder = new StringBuilder();
+    for (Map.Entry<String, Map<String, Table>> outer : tmpTables.entrySet()) {
+      if (!matchesAny(outer.getKey(), dbPatternList)) {
+        continue;
+      }
+      for (Map.Entry<String, Table> inner : outer.getValue().entrySet()) {
+        Table table = inner.getValue();
+        String tableName = table.getTableName();
+        String typeString = table.getTableType().name();
+        if (tableTypes != null && !tableTypes.contains(typeString)) {
+          continue;
+        }
+        if (!matchesAny(inner.getKey(), tblPatternList)) {
+          continue;
+        }
+        TableMeta tableMeta = new TableMeta(table.getDbName(), tableName, typeString);
+        tableMeta.setComments(table.getProperty("comment"));
+        tableMetas.add(tableMeta);
+      }
+    }
+    return tableMetas;
+  }
+  
+  private boolean matchesAny(String string, List<Matcher> matchers) {
+    for (Matcher matcher : matchers) {
+      if (matcher.reset(string).matches()) {
+        return true;
+      }
+    }
+    return matchers.isEmpty();
+  }
 
   @Override
   public List<org.apache.hadoop.hive.metastore.api.Table> getTableObjectsByName(String dbName,
@@ -508,12 +554,16 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   }
 
   public static Map<String, Table> getTempTablesForDatabase(String dbName) {
+    return getTempTables().get(dbName);
+  }
+  
+  public static Map<String, Map<String, Table>> getTempTables() {
     SessionState ss = SessionState.get();
     if (ss == null) {
       LOG.debug("No current SessionState, skipping temp tables");
-      return null;
+      return Collections.emptyMap();
     }
-    return ss.getTempTables().get(dbName);
+    return ss.getTempTables();
   }
 
   private Map<String, ColumnStatisticsObj> getTempTableColumnStatsForTable(String dbName,

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
index 296280f..65bbc1c 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
@@ -22,14 +22,11 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.metadata.TableIterable;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.service.cli.FetchOrientation;
 import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.OperationState;
@@ -48,7 +45,7 @@ public class GetTablesOperation extends MetadataOperation {
   private final String catalogName;
   private final String schemaName;
   private final String tableName;
-  private final List<String> tableTypes = new ArrayList<String>();
+  private final List<String> tableTypeList;
   private final RowSet rowSet;
   private final TableTypeMapping tableTypeMapping;
 
@@ -58,7 +55,14 @@ public class GetTablesOperation extends MetadataOperation {
   .addStringColumn("TABLE_SCHEM", "Schema name.")
   .addStringColumn("TABLE_NAME", "Table name.")
   .addStringColumn("TABLE_TYPE", "The table type, e.g. \"TABLE\", \"VIEW\", etc.")
-  .addStringColumn("REMARKS", "Comments about the table.");
+  .addStringColumn("REMARKS", "Comments about the table.")
+  .addStringColumn("TYPE_CAT", "The types catalog.")
+  .addStringColumn("TYPE_SCHEM", "The types schema.")
+  .addStringColumn("TYPE_NAME", "Type name.")
+  .addStringColumn("SELF_REFERENCING_COL_NAME", 
+      "Name of the designated \"identifier\" column of a typed table.")
+  .addStringColumn("REF_GENERATION", 
+      "Specifies how values in SELF_REFERENCING_COL_NAME are created.");
 
   protected GetTablesOperation(HiveSession parentSession,
       String catalogName, String schemaName, String tableName,
@@ -72,7 +76,12 @@ public class GetTablesOperation extends MetadataOperation {
     tableTypeMapping =
         TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
     if (tableTypes != null) {
-      this.tableTypes.addAll(tableTypes);
+      tableTypeList = new ArrayList<String>();
+      for (String tableType : tableTypes) {
+        tableTypeList.add(tableTypeMapping.mapToHiveType(tableType.trim()));
+      }
+    } else {
+      tableTypeList = null;
     }
     this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
   }
@@ -91,23 +100,17 @@ public class GetTablesOperation extends MetadataOperation {
       }
 
       String tablePattern = convertIdentifierPattern(tableName, true);
-      int maxBatchSize = SessionState.get().getConf().getIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
 
-      for (String dbName : metastoreClient.getDatabases(schemaPattern)) {
-        List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
-        for (Table table : new TableIterable(metastoreClient, dbName, tableNames, maxBatchSize)) {
-          Object[] rowData = new Object[] {
+      for (TableMeta tableMeta : 
+          metastoreClient.getTableMeta(schemaPattern, tablePattern, tableTypeList)) {
+        rowSet.addRow(new Object[] {
               DEFAULT_HIVE_CATALOG,
-              table.getDbName(),
-              table.getTableName(),
-              tableTypeMapping.mapToClientType(table.getTableType()),
-              table.getParameters().get("comment")
-              };
-          if (tableTypes.isEmpty() || tableTypes.contains(
-                tableTypeMapping.mapToClientType(table.getTableType()))) {
-            rowSet.addRow(rowData);
-          }
-        }
+              tableMeta.getDbName(),
+              tableMeta.getTableName(),
+              tableTypeMapping.mapToClientType(tableMeta.getTableType()),
+              tableMeta.getComments(),
+              null, null, null, null, null
+              });
       }
       setState(OperationState.FINISHED);
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b678ed85/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
index 4595ef5..285b4f9 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
@@ -18,7 +18,6 @@
 
 package org.apache.hive.service.cli.operation;
 
-import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -95,16 +94,30 @@ public abstract class MetadataOperation extends Operation {
    * other hand is done locally inside the hive code and that requires the regex wildchar
    * format '.*'  This is driven by the datanucleusFormat flag.
    */
-  private String convertPattern(final String pattern, boolean datanucleusFormat) {
+  private String convertPattern(String pattern, boolean datanucleusFormat) {
     String wStr;
     if (datanucleusFormat) {
       wStr = "*";
     } else {
       wStr = ".*";
     }
-    return pattern
-        .replaceAll("([^\\\\])%", "$1" + wStr).replaceAll("\\\\%", "%").replaceAll("^%", wStr)
-        .replaceAll("([^\\\\])_", "$1.").replaceAll("\\\\_", "_").replaceAll("^_", ".");
+    pattern = replaceAll(pattern, "([^\\\\])%", "$1" + wStr);
+    pattern = replaceAll(pattern, "\\\\%", "%");
+    pattern = replaceAll(pattern, "^%", wStr);
+    pattern = replaceAll(pattern, "([^\\\\])_", "$1.");
+    pattern = replaceAll(pattern, "\\\\_", "_");
+    pattern = replaceAll(pattern, "^_", ".");
+    return pattern;
+  }
+  
+  private String replaceAll(String input, final String pattern, final String replace) {
+    while (true) {
+      String replaced = input.replaceAll(pattern, replace);
+      if (replaced.equals(input)) {
+        return replaced;
+      }
+      input = replaced;
+    }
   }
 
   protected boolean isAuthV2Enabled(){


[03/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index 04eaaa1..bcf5944 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -117,14 +117,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -221,11 +221,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
index e9fc3f8..d138102 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
@@ -161,109 +161,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sum_expr_with_order.q.out b/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
index 00318e8..5e00930 100644
--- a/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
+++ b/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
@@ -12,4 +12,4 @@ order by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-13009100
+13009100.000

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
index 4b39b2c..69fab90 100644
--- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
@@ -1290,105 +1290,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1478,105 +1478,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: DROP TABLE decimal_mapjoin
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_mapjoin

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
index 7ca537d..4b15062 100644
--- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
@@ -166,112 +166,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -282,109 +282,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/update_all_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/update_all_types.q.out b/ql/src/test/results/clientpositive/tez/update_all_types.q.out
index 1cfa088..c5c1abb 100644
--- a/ql/src/test/results/clientpositive/tez/update_all_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/update_all_types.q.out
@@ -96,11 +96,11 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-741	-1070883071	-1070883071	-1645852809	NULL	-741.0	NULL	NULL	1969-12-31	0ruyd6Y50JpdGRf6HqD	0ruyd6Y50JpdGRf6HqD	xH7445Rals48VOulSyR5F               	false
@@ -150,12 +150,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false
@@ -184,12 +184,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
index d6a8517..0be71b1 100644
--- a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
@@ -170,4 +170,4 @@ select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc
 #### A masked pattern was here ####
--4997414117561.546875	4994550248722.298828	-10252745435816.02441	-5399023399.587163986308583465
+-4997414117561.546875000000000000	4994550248722.298828000000000000	-10252745435816.024410000000000000	-5399023399.587163986308583465

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
index 1d720c0..11c3d71 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
@@ -650,34 +650,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
index 331edd0..12920d2 100644
--- a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
@@ -204,13 +204,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
index 9474c2c..8a21697 100644
--- a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
@@ -159,7 +159,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -252,7 +252,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
index fc37e0d..3de006c 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
@@ -1184,7 +1184,7 @@ POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1355944339.1234567
+1355944339.12345670
 PREHOOK: query: explain
 select cast(true as decimal) as c from decimal_2 order by c
 PREHOOK: type: QUERY
@@ -1588,7 +1588,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1
+1.0000000000000000000
 PREHOOK: query: explain
 select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
index 75f872e..eea91bb 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
@@ -47,43 +47,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -92,43 +92,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -139,43 +139,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -185,34 +185,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -222,34 +222,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -283,71 +283,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -356,10 +356,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -368,10 +368,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
index 613f5a8..c7d3d9e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -149,43 +149,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -195,43 +195,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
index 34c3351..0bfd12e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
@@ -59,41 +59,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -103,32 +103,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -185,40 +185,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
index 9cdd7fc..e0ccbc6 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
@@ -119,27 +119,27 @@ NULL	0
 NULL	3
 NULL	4
 NULL	1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.00000	4400
+-1255.49000	-1255
+-1.12200	-11
+-1.12000	-1
+-0.33300	0
+-0.30000	0
+0.00000	0
+0.00000	0
+0.33300	0
+1.00000	1
+1.00000	1
+1.12000	1
+1.12200	1
+2.00000	2
+3.14000	3
+3.14000	3
+3.14000	4
+10.00000	10
 10.73433	5
-124	124
-125.2	125
+124.00000	124
+125.20000	125
 23232.23435	2
 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -151,27 +151,27 @@ POSTHOOK: Input: default@decimal_6_2
 #### A masked pattern was here ####
 NULL	0
 -1234567890.1235	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.0000	4400
+-1255.4900	-1255
+-1.1220	-11
+-1.1200	-1
+-0.3330	0
+-0.3000	0
+0.0000	0
+0.0000	0
+0.3330	0
+1.0000	1
+1.0000	1
+1.1200	1
+1.1220	1
+2.0000	2
+3.1400	3
+3.1400	3
+3.1400	4
+10.0000	10
 10.7343	5
-124	124
-125.2	125
+124.0000	124
+125.2000	125
 23232.2344	2
 2389432.2375	3
 2389432.2375	4
@@ -200,54 +200,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
index 683af86..d10f053 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
@@ -117,14 +117,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -221,11 +221,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
index 59b80f2..35b7e87 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
@@ -29,13 +29,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--13326.0	528534767	true	1969-12-31 15:59:46.674	-13326	528534767	1	-13
--15813.0	528534767	true	1969-12-31 15:59:55.787	-15813	528534767	1	-4
--9566.0	528534767	true	1969-12-31 15:59:44.187	-9566	528534767	1	-16
-15007.0	528534767	true	1969-12-31 15:59:50.434	15007	528534767	1	-10
-7021.0	528534767	true	1969-12-31 16:00:15.007	7021	528534767	1	15
-4963.0	528534767	true	1969-12-31 16:00:07.021	4963	528534767	1	7
--7824.0	528534767	true	1969-12-31 16:00:04.963	-7824	528534767	1	5
--15431.0	528534767	true	1969-12-31 15:59:52.176	-15431	528534767	1	-8
--15549.0	528534767	true	1969-12-31 15:59:44.569	-15549	528534767	1	-15
-5780.0	528534767	true	1969-12-31 15:59:44.451	5780	528534767	1	-16
+-13326.0	528534767	true	1969-12-31 15:59:46.674	-13326.0000000000	528534767.00000000000000	1.00	-13
+-15813.0	528534767	true	1969-12-31 15:59:55.787	-15813.0000000000	528534767.00000000000000	1.00	-4
+-9566.0	528534767	true	1969-12-31 15:59:44.187	-9566.0000000000	528534767.00000000000000	1.00	-16
+15007.0	528534767	true	1969-12-31 15:59:50.434	15007.0000000000	528534767.00000000000000	1.00	-10
+7021.0	528534767	true	1969-12-31 16:00:15.007	7021.0000000000	528534767.00000000000000	1.00	15
+4963.0	528534767	true	1969-12-31 16:00:07.021	4963.0000000000	528534767.00000000000000	1.00	7
+-7824.0	528534767	true	1969-12-31 16:00:04.963	-7824.0000000000	528534767.00000000000000	1.00	5
+-15431.0	528534767	true	1969-12-31 15:59:52.176	-15431.0000000000	528534767.00000000000000	1.00	-8
+-15549.0	528534767	true	1969-12-31 15:59:44.569	-15549.0000000000	528534767.00000000000000	1.00	-15
+5780.0	528534767	true	1969-12-31 15:59:44.451	5780.0000000000	528534767.00000000000000	1.00	-16

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 366d883..08c3ae9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -84,13 +84,13 @@ LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
-1836.441995841977	-1166.027234927254	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
-1856.1322245322462	-1178.5293139292924	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
-1858.7575883576155	-1180.196257796231	0.837241711366943	251986.76756757565	5.772972973	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
-1862.6956340956693	-1182.6966735966386	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
-1883.6985446985233	-1196.0322245322466	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
-1886.3239085238924	-1197.6991683991848	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
-1887.636590436577	-1198.532640332654	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
-1895.5126819126846	-1203.5334719334692	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
-1909.9521829522155	-1212.701663201631	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
-1913.8902286902692	-1215.2020790020384	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459
+1836.44199584197700	-1166.02723492725400	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
+1856.13222453224620	-1178.52931392929240	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
+1858.75758835761550	-1180.19625779623100	0.837241711366943	251986.76756757565	5.7729729730	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
+1862.69563409566930	-1182.69667359663860	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
+1883.69854469852330	-1196.03222453224660	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
+1886.32390852389240	-1197.69916839918480	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
+1887.63659043657700	-1198.53264033265400	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
+1895.51268191268460	-1203.53347193346920	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
+1909.95218295221550	-1212.70166320163100	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
+1913.89022869026920	-1215.20207900203840	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459

http://git-wip-us.apache.org/repos/asf/hive/blob/13f8cfec/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
index 240b875..3712549 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
@@ -156,109 +156,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9


[33/55] [abbrv] hive git commit: HIVE-12232 : BucketingSortingReduceSinkOptimizer throws IOB exception for duplicate columns

Posted by xu...@apache.org.
HIVE-12232 : BucketingSortingReduceSinkOptimizer throws IOB exception for duplicate columns

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1efb92a8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1efb92a8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1efb92a8

Branch: refs/heads/spark
Commit: 1efb92a8e5ba5aa662fc26a8bcaa5c972c18f7c6
Parents: ab7794c
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Nov 5 18:25:24 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sun Nov 8 17:03:54 2015 -0800

----------------------------------------------------------------------
 .../BucketingSortingReduceSinkOptimizer.java    |  6 ++
 .../clientpositive/insertoverwrite_bucket.q     |  9 +++
 .../clientpositive/insertoverwrite_bucket.q.out | 78 ++++++++++++++++++++
 3 files changed, 93 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1efb92a8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index a090a5b..d5df34c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -588,6 +588,12 @@ public class BucketingSortingReduceSinkOptimizer implements Transform {
             }
             // Only columns can be selected for both sorted and bucketed positions
             for (int pos : bucketPositions) {
+              if (pos >= selectDesc.getColList().size()) {
+                // e.g., INSERT OVERWRITE TABLE temp1 SELECT  c0,  c0 FROM temp2;
+                // In such a case Select Op will only have one instance of c0 and RS would have two.
+                // So, locating bucketCol in such cases will generate error. So, bail out.
+                return null;
+              }
               ExprNodeDesc selectColList = selectDesc.getColList().get(pos);
               if (!(selectColList instanceof ExprNodeColumnDesc)) {
                 return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/1efb92a8/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
index d939710..5a10f94 100644
--- a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
+++ b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
@@ -18,10 +18,19 @@ insert into table bucketinput values ("firstinsert3");
 set hive.enforce.bucketing = true; 
 set hive.enforce.sorting=true;
 insert overwrite table bucketoutput1 select * from bucketinput where data like 'first%'; 
+CREATE TABLE temp1
+(
+    change string,
+    num string
+)
+CLUSTERED BY (num) SORTED BY (num) INTO 4 BUCKETS;
+explain insert overwrite table temp1 select data, data from bucketinput;
+
 set hive.auto.convert.sortmerge.join=true; 
 set hive.optimize.bucketmapjoin = true; 
 set hive.optimize.bucketmapjoin.sortedmerge = true; 
 select * from bucketoutput1 a join bucketoutput2 b on (a.data=b.data);
+drop table temp1;
 drop table buckettestinput;
 drop table buckettestoutput1;
 drop table buckettestoutput2;

http://git-wip-us.apache.org/repos/asf/hive/blob/1efb92a8/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out b/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
index 9b7b85d..4add20c 100644
--- a/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
+++ b/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
@@ -80,6 +80,76 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketinput
 POSTHOOK: Output: default@bucketoutput1
 POSTHOOK: Lineage: bucketoutput1.data SIMPLE [(bucketinput)bucketinput.FieldSchema(name:data, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE temp1
+(
+    change string,
+    num string
+)
+CLUSTERED BY (num) SORTED BY (num) INTO 4 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@temp1
+POSTHOOK: query: CREATE TABLE temp1
+(
+    change string,
+    num string
+)
+CLUSTERED BY (num) SORTED BY (num) INTO 4 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@temp1
+PREHOOK: query: explain insert overwrite table temp1 select data, data from bucketinput
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table temp1 select data, data from bucketinput
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: bucketinput
+            Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: data (type: string)
+              outputColumnNames: _col1
+              Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col1 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col1 (type: string)
+                Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.temp1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.temp1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
 PREHOOK: query: select * from bucketoutput1 a join bucketoutput2 b on (a.data=b.data)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketoutput1
@@ -90,6 +160,14 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketoutput1
 POSTHOOK: Input: default@bucketoutput2
 #### A masked pattern was here ####
+PREHOOK: query: drop table temp1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@temp1
+PREHOOK: Output: default@temp1
+POSTHOOK: query: drop table temp1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@temp1
+POSTHOOK: Output: default@temp1
 PREHOOK: query: drop table buckettestinput
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table buckettestinput


[35/55] [abbrv] hive git commit: HIVE-12311 : explain CTAS fails if the table already exists (Gunther Hagleitner via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12311 : explain CTAS fails if the table already exists (Gunther Hagleitner via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c29a685f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c29a685f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c29a685f

Branch: refs/heads/spark
Commit: c29a685f230955bd39c16e57a540c7919b5a9b1b
Parents: 9234712
Author: Gunther Hagleitner <gu...@apache.org>
Authored: Mon Nov 2 14:29:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sun Nov 8 17:21:07 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   4 +-
 .../test/queries/clientpositive/explain_ddl.q   |  28 +
 .../results/clientpositive/explain_ddl.q.out    | 604 +++++++++++++++++++
 3 files changed, 634 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c29a685f/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f7e2039..1ca113c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10961,7 +10961,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               + dbName + "." + tblName);
         }
         Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
-        if (tables != null && tables.containsKey(tblName)) {
+        if (tables != null && tables.containsKey(tblName) && !ctx.getExplain())  {
           throw new SemanticException("Temporary table " + dbName + "." + tblName
               + " already exists");
         }
@@ -10970,7 +10970,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         // dumpTable is only used to check the conflict for non-temporary tables
         try {
           Table dumpTable = db.newTable(dbDotTab);
-          if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false)) {
+          if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.getExplain()) {
             throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
           }
         } catch (HiveException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/c29a685f/ql/src/test/queries/clientpositive/explain_ddl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_ddl.q b/ql/src/test/queries/clientpositive/explain_ddl.q
new file mode 100644
index 0000000..e255179
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/explain_ddl.q
@@ -0,0 +1,28 @@
+-- This test is used for testing explain for DDL/DML statements
+
+-- Create some views and tabels
+CREATE VIEW V1 AS SELECT key, value from src;
+select count(*) from V1 where key > 0;
+
+CREATE TABLE M1 AS SELECT key, value from src;
+select count(*) from M1 where key > 0;
+
+EXPLAIN CREATE TABLE M1 AS select * from src;
+EXPLAIN CREATE TABLE M1 AS select * from M1;
+EXPLAIN CREATE TABLE M1 AS select * from V1;
+
+EXPLAIN CREATE TABLE V1 AS select * from M1;
+EXPLAIN CREATE VIEW V1 AS select * from M1;
+
+EXPLAIN CREATE TABLE M1 LIKE src;
+EXPLAIN CREATE TABLE M1 LIKE M1;
+
+EXPLAIN DROP TABLE M1;
+select count(*) from M1 where key > 0;
+
+EXPLAIN INSERT INTO M1 SELECT * FROM M1;
+select count(*) from M1 where key > 0;
+
+EXPLAIN TRUNCATE TABLE M1;
+select count(*) from M1 where key > 0;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c29a685f/ql/src/test/results/clientpositive/explain_ddl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out
new file mode 100644
index 0000000..3371e7e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/explain_ddl.q.out
@@ -0,0 +1,604 @@
+PREHOOK: query: -- This test is used for testing explain for DDL/DML statements
+
+-- Create some views and tabels
+CREATE VIEW V1 AS SELECT key, value from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@V1
+POSTHOOK: query: -- This test is used for testing explain for DDL/DML statements
+
+-- Create some views and tabels
+CREATE VIEW V1 AS SELECT key, value from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@V1
+PREHOOK: query: select count(*) from V1 where key > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from V1 where key > 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+497
+PREHOOK: query: CREATE TABLE M1 AS SELECT key, value from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@M1
+POSTHOOK: query: CREATE TABLE M1 AS SELECT key, value from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@M1
+PREHOOK: query: select count(*) from M1 where key > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@m1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from M1 where key > 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@m1
+#### A masked pattern was here ####
+497
+PREHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-8 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-8
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.M1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-8
+      Create Table Operator:
+        Create Table
+          columns: key string, value string
+          input format: org.apache.hadoop.mapred.TextInputFormat
+          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          name: default.M1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from M1
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from M1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-8 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-8
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: m1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.M1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-8
+      Create Table Operator:
+        Create Table
+          columns: key string, value string
+          input format: org.apache.hadoop.mapred.TextInputFormat
+          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          name: default.M1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from V1
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TABLE M1 AS select * from V1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-8 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-8
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.M1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-8
+      Create Table Operator:
+        Create Table
+          columns: key string, value string
+          input format: org.apache.hadoop.mapred.TextInputFormat
+          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          name: default.M1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.M1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: EXPLAIN CREATE TABLE V1 AS select * from M1
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TABLE V1 AS select * from M1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-8 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-8
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: m1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.V1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-8
+      Create Table Operator:
+        Create Table
+          columns: key string, value string
+          input format: org.apache.hadoop.mapred.TextInputFormat
+          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          name: default.V1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.V1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.V1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: EXPLAIN CREATE VIEW V1 AS select * from M1
+PREHOOK: type: CREATEVIEW
+POSTHOOK: query: EXPLAIN CREATE VIEW V1 AS select * from M1
+POSTHOOK: type: CREATEVIEW
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Create View Operator:
+        Create View
+          or replace: false
+          columns: key string, value string
+          expanded text: select `m1`.`key`, `m1`.`value` from `default`.`M1`
+          name: default.V1
+          original text: select * from M1
+
+PREHOOK: query: EXPLAIN CREATE TABLE M1 LIKE src
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: EXPLAIN CREATE TABLE M1 LIKE src
+POSTHOOK: type: CREATETABLE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Create Table Operator:
+        Create Table
+          default input format: org.apache.hadoop.mapred.TextInputFormat
+          default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          like: src
+          name: default.M1
+
+PREHOOK: query: EXPLAIN CREATE TABLE M1 LIKE M1
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: EXPLAIN CREATE TABLE M1 LIKE M1
+POSTHOOK: type: CREATETABLE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Create Table Operator:
+        Create Table
+          default input format: org.apache.hadoop.mapred.TextInputFormat
+          default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+          default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          like: M1
+          name: default.M1
+
+PREHOOK: query: EXPLAIN DROP TABLE M1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: EXPLAIN DROP TABLE M1
+POSTHOOK: type: DROPTABLE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Drop Table Operator:
+        Drop Table
+          table: M1
+
+PREHOOK: query: select count(*) from M1 where key > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@m1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from M1 where key > 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@m1
+#### A masked pattern was here ####
+497
+PREHOOK: query: EXPLAIN INSERT INTO M1 SELECT * FROM M1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO M1 SELECT * FROM M1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: m1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.m1
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.m1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.m1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.m1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(*) from M1 where key > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@m1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from M1 where key > 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@m1
+#### A masked pattern was here ####
+497
+PREHOOK: query: EXPLAIN TRUNCATE TABLE M1
+PREHOOK: type: TRUNCATETABLE
+POSTHOOK: query: EXPLAIN TRUNCATE TABLE M1
+POSTHOOK: type: TRUNCATETABLE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Truncate Table Operator:
+        Truncate Table or Partition
+          TableName: M1
+
+PREHOOK: query: select count(*) from M1 where key > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@m1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from M1 where key > 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@m1
+#### A masked pattern was here ####
+497


[39/55] [abbrv] hive git commit: HIVE-12364 : Distcp job fails when run under Tez (Prasanth J via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12364 : Distcp job fails when run under Tez (Prasanth J via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/16521c40
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/16521c40
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/16521c40

Branch: refs/heads/spark
Commit: 16521c40055afa86a242dad1a5ce708d2aa9b631
Parents: d5a69ec
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon Nov 9 17:59:37 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Nov 9 17:59:37 2015 -0800

----------------------------------------------------------------------
 itests/qtest/pom.xml                                  |  6 ++++++
 .../src/test/resources/testconfiguration.properties   |  1 +
 .../test/queries/clientpositive/insert_dir_distcp.q   |  9 +++++++++
 .../results/clientpositive/insert_dir_distcp.q.out    | 14 ++++++++++++++
 .../clientpositive/tez/insert_dir_distcp.q.out        | 14 ++++++++++++++
 .../org/apache/hadoop/hive/shims/Hadoop23Shims.java   |  3 +++
 6 files changed, 47 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index 65c3c75..cfa49ba 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -145,6 +145,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+    </dependency>
+      <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
       <classifier>tests</classifier>

http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index d16c318..70f96da 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -26,6 +26,7 @@ minimr.query.files=auto_sortmerge_join_16.q,\
   infer_bucket_sort_num_buckets.q,\
   infer_bucket_sort_reducers_power_two.q,\
   input16_cc.q,\
+  insert_dir_distcp.q,\
   join1.q,\
   leftsemijoin_mr.q,\
   list_bucket_dml_10.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/ql/src/test/queries/clientpositive/insert_dir_distcp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_dir_distcp.q b/ql/src/test/queries/clientpositive/insert_dir_distcp.q
new file mode 100644
index 0000000..6582938
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/insert_dir_distcp.q
@@ -0,0 +1,9 @@
+set hive.exec.copyfile.maxsize=400;
+
+set tez.am.log.level=INFO;
+set tez.task.log.level=INFO;
+-- see TEZ-2931 for using INFO logging
+
+insert overwrite directory '/tmp/src' select * from src;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/src/;

http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/ql/src/test/results/clientpositive/insert_dir_distcp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_dir_distcp.q.out b/ql/src/test/results/clientpositive/insert_dir_distcp.q.out
new file mode 100644
index 0000000..b70fa01
--- /dev/null
+++ b/ql/src/test/results/clientpositive/insert_dir_distcp.q.out
@@ -0,0 +1,14 @@
+PREHOOK: query: -- see TEZ-2931 for using INFO logging
+
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- see TEZ-2931 for using INFO logging
+
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/ql/src/test/results/clientpositive/tez/insert_dir_distcp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/insert_dir_distcp.q.out b/ql/src/test/results/clientpositive/tez/insert_dir_distcp.q.out
new file mode 100644
index 0000000..b70fa01
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/insert_dir_distcp.q.out
@@ -0,0 +1,14 @@
+PREHOOK: query: -- see TEZ-2931 for using INFO logging
+
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- see TEZ-2931 for using INFO logging
+
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/16521c40/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 36282a5..4da98e4 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -1300,11 +1300,14 @@ public class Hadoop23Shims extends HadoopShimsSecure {
     options.setSkipCRC(true);
     options.preserve(FileAttribute.BLOCKSIZE);
     try {
+      conf.setBoolean("mapred.mapper.new-api", true);
       DistCp distcp = new DistCp(conf, options);
       distcp.execute();
       return true;
     } catch (Exception e) {
       throw new IOException("Cannot execute DistCp process: " + e, e);
+    } finally {
+      conf.setBoolean("mapred.mapper.new-api", false);
     }
   }
 


[37/55] [abbrv] hive git commit: HIVE-12354 : MapJoin with double keys is slow on MR (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by xu...@apache.org.
HIVE-12354 : MapJoin with double keys is slow on MR (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/41b60c44
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/41b60c44
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/41b60c44

Branch: refs/heads/spark
Commit: 41b60c44401d92787227b5cdf2a51c20d28a2bc4
Parents: 08e9d26
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Nov 9 16:32:31 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Nov 9 16:32:31 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  | 20 +--------------
 .../ql/exec/persistence/MapJoinKeyObject.java   |  6 ++---
 .../objectinspector/ObjectInspectorUtils.java   | 26 ++++++++++++++++++++
 3 files changed, 29 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/41b60c44/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
index 1c409a2..5154a5f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
@@ -105,25 +105,7 @@ public class KeyWrapperFactory {
 
     @Override
     public void setHashKey() {
-      if (keys == null) {
-        hashcode = 0;
-      } else {
-        hashcode = 1;
-        for (Object element : keys) {
-          hashcode = 31 * hashcode;
-          if(element != null) {
-            if(element instanceof LazyDouble) {
-              long v = Double.doubleToLongBits(((LazyDouble)element).getWritableObject().get());
-              hashcode = hashcode + (int) (v ^ (v >>> 32));
-            } else if (element instanceof DoubleWritable){
-              long v = Double.doubleToLongBits(((DoubleWritable)element).get());
-              hashcode = hashcode + (int) (v ^ (v >>> 32));
-            } else {
-              hashcode = hashcode + element.hashCode();
-            }
-          }
-        }
-      }
+      hashcode = ObjectInspectorUtils.writableArrayHashCode(keys);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/41b60c44/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
index e1fd6d3..7592f9e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyObject.java
@@ -78,11 +78,9 @@ public class MapJoinKeyObject extends MapJoinKey {
 
   @Override
   public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + Arrays.hashCode(key);
-    return result;
+    return ObjectInspectorUtils.writableArrayHashCode(key);
   }
+
   @Override
   public boolean equals(Object obj) {
     if (this == obj)

http://git-wip-us.apache.org/repos/asf/hive/blob/41b60c44/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
index 56597a2..7a13eb0 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
 import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
 import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyDouble;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveWritableObjectInspector;
@@ -77,6 +78,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectIn
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.StringUtils;
 
@@ -104,6 +106,30 @@ public final class ObjectInspectorUtils {
   }
 
   /**
+   * Calculates the hash code for array of Objects that contains writables. This is used
+   * to work around the buggy Hadoop DoubleWritable hashCode implementation. This should
+   * only be used for process-local hash codes; don't replace stored hash codes like bucketing.
+   */
+  public static int writableArrayHashCode(Object[] keys) {
+    if (keys == null) return 0;
+    int hashcode = 1;
+    for (Object element : keys) {
+      hashcode = 31 * hashcode;
+      if (element == null) continue;
+      if (element instanceof LazyDouble) {
+        long v = Double.doubleToLongBits(((LazyDouble)element).getWritableObject().get());
+        hashcode = hashcode + (int) (v ^ (v >>> 32));
+      } else if (element instanceof DoubleWritable){
+        long v = Double.doubleToLongBits(((DoubleWritable)element).get());
+        hashcode = hashcode + (int) (v ^ (v >>> 32));
+      } else {
+        hashcode = hashcode + element.hashCode();
+      }
+    }
+    return hashcode;
+  }
+
+  /**
    * Ensures that an ObjectInspector is Writable.
    */
   public static ObjectInspector getWritableObjectInspector(ObjectInspector oi) {


[52/55] [abbrv] hive git commit: HIVE-12186 : Upgrade Hive to Calcite 1.5 (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-12186 : Upgrade Hive to Calcite 1.5 (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/da4b1b07
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/da4b1b07
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/da4b1b07

Branch: refs/heads/spark
Commit: da4b1b07764718210377814edf06d5960d074f79
Parents: 1d5da09
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Nov 11 11:31:22 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Nov 11 11:31:22 2015 -0800

----------------------------------------------------------------------
 pom.xml                                         |   2 +-
 .../calcite/reloperators/HiveTableScan.java     |   6 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      | 143 ++-----------------
 .../calcite/translator/ASTConverter.java        |  34 ++---
 .../calcite/translator/HiveOpConverter.java     |   7 +-
 .../translator/PlanModifierForASTConv.java      |   3 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   8 +-
 .../bucketizedhiveinputformat.q.out             |   2 +
 .../spark/bucketizedhiveinputformat.q.out       |   2 +
 9 files changed, 38 insertions(+), 169 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 282d077..99aeff7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -105,7 +105,7 @@
     <antlr.version>3.4</antlr.version>
     <avro.version>1.7.7</avro.version>
     <bonecp.version>0.8.0.RELEASE</bonecp.version>
-    <calcite.version>1.4.0-incubating</calcite.version>
+    <calcite.version>1.5.0</calcite.version>
     <datanucleus-api-jdo.version>3.2.6</datanucleus-api-jdo.version>
     <datanucleus-core.version>3.2.10</datanucleus-core.version>
     <datanucleus-rdbms.version>3.2.9</datanucleus-rdbms.version>

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
index 1831d69..446dc73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
@@ -29,13 +29,13 @@ import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelWriter;
-import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
@@ -152,7 +152,7 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
 
   @Override
   public RelNode project(ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields,
-      RelFactories.ProjectFactory projectFactory) {
+      RelBuilder relBuilder) {
 
     // 1. If the schema is the same then bail out
     final int fieldCount = getRowType().getFieldCount();
@@ -183,7 +183,7 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
         fieldNames));
 
     // 5. Add Proj on top of TS
-    return projectFactory.createProject(newHT, exprList, new ArrayList<String>(fieldNames));
+    return relBuilder.push(newHT).project(exprList, new ArrayList<String>(fieldNames)).build();
   }
 
   public List<Integer> getNeededColIndxsFrmReloptHT() {

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
index a12fa2a..b543fbb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
@@ -23,10 +23,9 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
@@ -35,22 +34,23 @@ import org.apache.calcite.rex.RexPermuteInputsShuttle;
 import org.apache.calcite.rex.RexVisitor;
 import org.apache.calcite.sql.validate.SqlValidator;
 import org.apache.calcite.sql2rel.RelFieldTrimmer;
+import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.mapping.IntPair;
 import org.apache.calcite.util.mapping.Mapping;
 import org.apache.calcite.util.mapping.MappingType;
 import org.apache.calcite.util.mapping.Mappings;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin;
 
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
 public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
-  private final RelFactories.AggregateFactory aggregateFactory;
+  protected static final Log LOG = LogFactory.getLog(HiveRelFieldTrimmer.class);
+
 
   public HiveRelFieldTrimmer(SqlValidator validator,
+      RelOptCluster cluster,
       RelFactories.ProjectFactory projectFactory,
       RelFactories.FilterFactory filterFactory,
       RelFactories.JoinFactory joinFactory,
@@ -58,9 +58,10 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
       RelFactories.SortFactory sortFactory,
       RelFactories.AggregateFactory aggregateFactory,
       RelFactories.SetOpFactory setOpFactory) {
-    super(validator, projectFactory, filterFactory, joinFactory,
-            semiJoinFactory, sortFactory, aggregateFactory, setOpFactory);
-    this.aggregateFactory = aggregateFactory;
+    super(validator,
+            RelBuilder.proto(projectFactory, filterFactory, joinFactory,
+                semiJoinFactory, sortFactory, aggregateFactory, setOpFactory)
+            .create(cluster, null));
   }
 
   /**
@@ -155,127 +156,5 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
     return new TrimResult(newJoin, mapping);
   }
-  /**
-   * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
-   * {@link org.apache.calcite.rel.logical.LogicalAggregate}.
-   */
-  @Override
-  public TrimResult trimFields(
-      Aggregate aggregate,
-      ImmutableBitSet fieldsUsed,
-      Set<RelDataTypeField> extraFields) {
-    // Fields:
-    //
-    // | sys fields | group fields | indicator fields | agg functions |
-    //
-    // Two kinds of trimming:
-    //
-    // 1. If agg rel has system fields but none of these are used, create an
-    // agg rel with no system fields.
-    //
-    // 2. If aggregate functions are not used, remove them.
-    //
-    // But group and indicator fields stay, even if they are not used.
-
-    final RelDataType rowType = aggregate.getRowType();
-
-    // Compute which input fields are used.
-    // 1. group fields are always used
-    final ImmutableBitSet.Builder inputFieldsUsed =
-        ImmutableBitSet.builder(aggregate.getGroupSet());
-    // 2. agg functions
-    for (AggregateCall aggCall : aggregate.getAggCallList()) {
-      for (int i : aggCall.getArgList()) {
-        inputFieldsUsed.set(i);
-      }
-      if (aggCall.filterArg >= 0) {
-        inputFieldsUsed.set(aggCall.filterArg);
-      }
-    }
-
-    // Create input with trimmed columns.
-    final RelNode input = aggregate.getInput();
-    final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
-    final TrimResult trimResult =
-        trimChild(aggregate, input, inputFieldsUsed.build(), inputExtraFields);
-    final RelNode newInput = trimResult.left;
-    final Mapping inputMapping = trimResult.right;
-
-    // We have to return group keys and (if present) indicators.
-    // So, pretend that the consumer asked for them.
-    final int groupCount = aggregate.getGroupSet().cardinality();
-    final int indicatorCount = aggregate.getIndicatorCount();
-    fieldsUsed =
-        fieldsUsed.union(ImmutableBitSet.range(groupCount + indicatorCount));
-
-    // If the input is unchanged, and we need to project all columns,
-    // there's nothing to do.
-    if (input == newInput
-        && fieldsUsed.equals(ImmutableBitSet.range(rowType.getFieldCount()))) {
-      return new TrimResult(
-          aggregate,
-          Mappings.createIdentity(rowType.getFieldCount()));
-    }
-
-    // Which agg calls are used by our consumer?
-    int j = groupCount + indicatorCount;
-    int usedAggCallCount = 0;
-    for (int i = 0; i < aggregate.getAggCallList().size(); i++) {
-      if (fieldsUsed.get(j++)) {
-        ++usedAggCallCount;
-      }
-    }
-
-    // Offset due to the number of system fields having changed.
-    Mapping mapping =
-        Mappings.create(
-            MappingType.INVERSE_SURJECTION,
-            rowType.getFieldCount(),
-            groupCount + indicatorCount + usedAggCallCount);
-
-    final ImmutableBitSet newGroupSet =
-        Mappings.apply(inputMapping, aggregate.getGroupSet());
-
-    final ImmutableList<ImmutableBitSet> newGroupSets =
-        ImmutableList.copyOf(
-            Iterables.transform(aggregate.getGroupSets(),
-                new Function<ImmutableBitSet, ImmutableBitSet>() {
-                  @Override
-                  public ImmutableBitSet apply(ImmutableBitSet input) {
-                    return Mappings.apply(inputMapping, input);
-                  }
-                }));
-
-    // Populate mapping of where to find the fields. System, group key and
-    // indicator fields first.
-    for (j = 0; j < groupCount + indicatorCount; j++) {
-      mapping.set(j, j);
-    }
-
-    // Now create new agg calls, and populate mapping for them.
-    final List<AggregateCall> newAggCallList = new ArrayList<>();
-    j = groupCount + indicatorCount;
-    for (AggregateCall aggCall : aggregate.getAggCallList()) {
-      if (fieldsUsed.get(j)) {
-        AggregateCall newAggCall =
-            aggCall.copy(Mappings.apply2(inputMapping, aggCall.getArgList()),
-                Mappings.apply(inputMapping, aggCall.filterArg));
-        if (newAggCall.equals(aggCall)) {
-          newAggCall = aggCall; // immutable -> canonize to save space
-        }
-        mapping.set(j, groupCount + indicatorCount + newAggCallList.size());
-        newAggCallList.add(newAggCall);
-      }
-      ++j;
-    }
-
-    RelNode newAggregate = aggregateFactory.createAggregate(newInput,
-        aggregate.indicator, newGroupSet, newGroupSets, newAggCallList);
-
-    assert newAggregate.getClass() == aggregate.getClass();
-
-    return new TrimResult(newAggregate, mapping);
-  }
-
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index e4ac154..d026e58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -77,8 +77,7 @@ public class ASTConverter {
   private Aggregate        groupBy;
   private Filter           having;
   private Project          select;
-  private Sort             order;
-  private Sort             limit;
+  private Sort             orderLimit;
 
   private Schema           schema;
 
@@ -203,27 +202,14 @@ public class ASTConverter {
      * parent hence we need to go top down; but OB at each block really belong
      * to its src/from. Hence the need to pass in sort for each block from
      * its parent.
+     * 8. Limit
      */
-    convertOBToASTNode((HiveSortLimit) order);
-
-    // 8. Limit
-    convertLimitToASTNode((HiveSortLimit) limit);
+    convertOrderLimitToASTNode((HiveSortLimit) orderLimit);
 
     return hiveAST.getAST();
   }
 
-  private void convertLimitToASTNode(HiveSortLimit limit) {
-    if (limit != null) {
-      HiveSortLimit hiveLimit = limit;
-      RexNode limitExpr = hiveLimit.getFetchExpr();
-      if (limitExpr != null) {
-        Object val = ((RexLiteral) limitExpr).getValue2();
-        hiveAST.limit = ASTBuilder.limit(val);
-      }
-    }
-  }
-
-  private void convertOBToASTNode(HiveSortLimit order) {
+  private void convertOrderLimitToASTNode(HiveSortLimit order) {
     if (order != null) {
       HiveSortLimit hiveSortLimit = order;
       if (!hiveSortLimit.getCollation().getFieldCollations().isEmpty()) {
@@ -264,6 +250,12 @@ public class ASTConverter {
         }
         hiveAST.order = orderAst;
       }
+
+      RexNode limitExpr = hiveSortLimit.getFetchExpr();
+      if (limitExpr != null) {
+        Object val = ((RexLiteral) limitExpr).getValue2();
+        hiveAST.limit = ASTBuilder.limit(val);
+      }
     }
   }
 
@@ -366,11 +358,7 @@ public class ASTConverter {
         if (ASTConverter.this.select != null) {
           ASTConverter.this.from = node;
         } else {
-          Sort hiveSortRel = (Sort) node;
-          if (hiveSortRel.getCollation().getFieldCollations().isEmpty())
-            ASTConverter.this.limit = hiveSortRel;
-          else
-            ASTConverter.this.order = hiveSortRel;
+          ASTConverter.this.orderLimit = (Sort) node;
         }
       }
       /*

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index f0f8aa8..130ee89 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -487,7 +487,7 @@ public class HiveOpConverter {
       if (this.semanticAnalyzer != null && semanticAnalyzer.getQB() != null
           && semanticAnalyzer.getQB().getParseInfo() != null)
         this.semanticAnalyzer.getQB().getParseInfo().setOuterQueryLimit(limit);
-      ArrayList<ColumnInfo> cinfoLst = createColInfos(inputOp);
+      ArrayList<ColumnInfo> cinfoLst = createColInfos(resultOp);
       resultOp = OperatorFactory.getAndMakeChild(limitDesc,
           new RowSchema(cinfoLst), resultOp);
 
@@ -1059,10 +1059,6 @@ public class HiveOpConverter {
   }
 
   private static JoinType extractJoinType(HiveJoin join) {
-    // UNIQUE
-    if (join.isDistinct()) {
-      return JoinType.UNIQUE;
-    }
     // SEMIJOIN
     if (join.isLeftSemiJoin()) {
       return JoinType.LEFTSEMI;
@@ -1080,6 +1076,7 @@ public class HiveOpConverter {
       resultJoinType = JoinType.RIGHTOUTER;
       break;
     default:
+      // TODO: UNIQUE JOIN
       resultJoinType = JoinType.INNER;
       break;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
index e820496..29e08f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
@@ -312,7 +312,8 @@ public class PlanModifierForASTConv {
     boolean validChild = true;
     RelNode child = sortNode.getInput();
 
-    if (!(HiveCalciteUtil.limitRelNode(sortNode) && HiveCalciteUtil.orderRelNode(child))
+    if (!(HiveCalciteUtil.limitRelNode(sortNode) && HiveCalciteUtil.orderRelNode(child)
+            && HiveCalciteUtil.limitRelNode(child))
         && !(child instanceof Project)) {
       validChild = false;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index e13356c..de67b54 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -58,8 +58,6 @@ import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.Sort;
 import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
@@ -902,7 +900,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
                 HiveJoinToMultiJoinRule.INSTANCE, HiveProjectMergeRule.INSTANCE);
         // The previous rules can pull up projections through join operators,
         // thus we run the field trimmer again to push them back down
-        HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
+        HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null,
+            cluster, HiveProject.DEFAULT_PROJECT_FACTORY,
             HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY,
             HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY,
             HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
@@ -983,7 +982,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
           new HivePartitionPruneRule(conf));
 
       // 5. Projection Pruning
-      HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
+      HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null,
+          cluster, HiveProject.DEFAULT_PROJECT_FACTORY,
           HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY,
           HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY,
           HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
index 277b0f7..cfb95be 100644
--- a/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
+++ b/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
@@ -22,6 +22,8 @@ POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 

http://git-wip-us.apache.org/repos/asf/hive/blob/da4b1b07/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
index 98c5802..c8fc4d3 100644
--- a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
@@ -22,6 +22,8 @@ POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 


[34/55] [abbrv] hive git commit: HIVE-12358: Categorize vectorization benchmarks into arithmetic, comparison, logic(Teddy Choi, reviewed by Ashutosh Chauhan, Ferdinand Xu)

Posted by xu...@apache.org.
HIVE-12358: Categorize vectorization benchmarks into arithmetic, comparison, logic(Teddy Choi, reviewed by Ashutosh Chauhan, Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9234712b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9234712b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9234712b

Branch: refs/heads/spark
Commit: 9234712b40d1cc5bb08159f0fa899346dd9ab73f
Parents: 1efb92a
Author: Teddy Choi <tc...@hortonworks.com>
Authored: Sun Nov 8 20:08:07 2015 -0500
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Sun Nov 8 20:09:31 2015 -0500

----------------------------------------------------------------------
 .../vectorization/AbstractExpression.java       | 150 ++++++
 .../vectorization/VectorizationBench.java       | 506 -------------------
 .../VectorizedArithmeticBench.java              | 112 ++++
 .../VectorizedComparisonBench.java              | 215 ++++++++
 .../vectorization/VectorizedLogicBench.java     | 147 ++++++
 5 files changed, 624 insertions(+), 506 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9234712b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/AbstractExpression.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/AbstractExpression.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/AbstractExpression.java
new file mode 100644
index 0000000..94af3e0
--- /dev/null
+++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/AbstractExpression.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.benchmark.vectorization;
+
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+@BenchmarkMode(Mode.AverageTime)
+@Fork(1)
+@State(Scope.Thread)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public abstract class AbstractExpression {
+  private static final int DEFAULT_ITER_TIME = 1000000;
+  protected VectorExpression expression;
+  protected VectorizedRowBatch rowBatch;
+
+  protected VectorizedRowBatch buildRowBatch(ColumnVector output, int colNum, ColumnVector...
+    cols) {
+    VectorizedRowBatch rowBatch = new VectorizedRowBatch(colNum + 1);
+    for (int i = 0; i < cols.length; i++) {
+      rowBatch.cols[i] = cols[i];
+    }
+    rowBatch.cols[colNum] = output;
+    return rowBatch;
+  }
+
+  @Setup
+  public abstract void setup();
+
+  @Benchmark
+  @Warmup(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
+  @Measurement(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
+  public void bench() {
+    for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+      expression.evaluate(rowBatch);
+    }
+  }
+
+  protected LongColumnVector getLongColumnVector() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      columnVector.vector[i] = random.nextLong();
+    }
+    return columnVector;
+  }
+
+  protected LongColumnVector getRepeatingLongColumnVector() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.fill(2);
+    return columnVector;
+  }
+
+  protected LongColumnVector getLongColumnVectorWithNull() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.noNulls = false;
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      if (i % 100 == 0) {
+        columnVector.isNull[i] = true;
+      }
+      columnVector.vector[i] = random.nextLong();
+    }
+    return columnVector;
+  }
+
+  protected LongColumnVector getBooleanLongColumnVector() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      columnVector.vector[i] = random.nextInt(2);
+    }
+    return columnVector;
+  }
+
+  protected LongColumnVector getBooleanRepeatingLongColumnVector() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.fill(1);
+    return columnVector;
+  }
+
+  protected LongColumnVector getBooleanLongColumnVectorWithNull() {
+    LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.noNulls = false;
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      if (i % 100 == 0) {
+        columnVector.isNull[i] = true;
+      }
+      columnVector.vector[i] = random.nextInt(2);
+    }
+    return columnVector;
+  }
+
+  protected DoubleColumnVector getDoubleColumnVector() {
+    DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      columnVector.vector[i] = random.nextDouble();
+    }
+    return columnVector;
+  }
+
+  protected DoubleColumnVector getRepeatingDoubleColumnVector() {
+    DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.fill(2.0d);
+    return columnVector;
+  }
+
+  protected DoubleColumnVector getDoubleColumnVectorWithNull() {
+    DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+    columnVector.noNulls = false;
+    Random random = new Random();
+    for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
+      if (i % 100 == 0) {
+        columnVector.isNull[i] = true;
+      }
+      columnVector.vector[i] = random.nextDouble();
+    }
+    return columnVector;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9234712b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizationBench.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizationBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizationBench.java
deleted file mode 100644
index 642c5e1..0000000
--- a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizationBench.java
+++ /dev/null
@@ -1,506 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.benchmark.vectorization;
-
-import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.IfExprLongColumnLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.IfExprDoubleColumnDoubleColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.ColAndCol;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.ColOrCol;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColDivideLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.NotCol;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColNotEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColEqualLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterEqualLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessEqualLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColNotEqualLongScalar;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarGreaterEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarGreaterLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarLessEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarLessLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarNotEqualLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColDivideLongColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColAddDoubleColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColDivideDoubleColumn;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.LongColAddLongColumn;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-
-import java.util.Arrays;
-import java.lang.Override;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-@State(Scope.Benchmark)
-public class VectorizationBench {
-  /**
-   * This test measures the performance for vectorization.
-   * <p/>
-   * This test uses JMH framework for benchmarking.
-   * You may execute this benchmark tool using JMH command line in different ways:
-   * <p/>
-   * To use the settings shown in the main() function, use:
-   * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizationBench
-   * <p/>
-   * To use the default settings used by JMH, use:
-   * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization VectorizationBench
-   * <p/>
-   * To specify different parameters, use:
-   * - This command will use 10 warm-up iterations, 5 test iterations, and 2 forks. And it will
-   * display the Average Time (avgt) in Microseconds (us)
-   * - Benchmark mode. Available modes are:
-   * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, All/all]
-   * - Output time unit. Available time units are: [m, s, ms, us, ns].
-   * <p/>
-   * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization VectorizationBench
-   * -wi 10 -i 5 -f 2 -bm avgt -tu us
-   */
-
-  @BenchmarkMode(Mode.AverageTime)
-  @Fork(1)
-  @State(Scope.Thread)
-  @OutputTimeUnit(TimeUnit.NANOSECONDS)
-  public static abstract class AbstractExpression {
-    private static final int DEFAULT_ITER_TIME = 1000000;
-    protected VectorExpression expression;
-    protected VectorizedRowBatch rowBatch;
-
-    protected VectorizedRowBatch buildRowBatch(ColumnVector output, int colNum, ColumnVector...
-      cols) {
-      VectorizedRowBatch rowBatch = new VectorizedRowBatch(colNum + 1);
-      for (int i = 0; i < cols.length; i++) {
-        rowBatch.cols[i] = cols[i];
-      }
-      rowBatch.cols[colNum] = output;
-//      rowBatch.selectedInUse = true;
-      return rowBatch;
-    }
-
-    @Setup
-    public abstract void setup();
-
-    @Benchmark
-    @Warmup(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
-    public void bench() {
-      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
-        expression.evaluate(rowBatch);
-      }
-    }
-
-    protected LongColumnVector getLongColumnVector() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        columnVector.vector[i] = random.nextLong();
-      }
-      return columnVector;
-    }
-
-    protected LongColumnVector getRepeatingLongColumnVector() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.fill(2);
-      return columnVector;
-    }
-
-    protected LongColumnVector getLongColumnVectorWithNull() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.noNulls = false;
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        if (i % 100 == 0) {
-          columnVector.isNull[i] = true;
-        }
-        columnVector.vector[i] = random.nextLong();
-      }
-      return columnVector;
-    }
-
-    protected LongColumnVector getBooleanLongColumnVector() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        columnVector.vector[i] = random.nextInt(2);
-      }
-      return columnVector;
-    }
-
-    protected LongColumnVector getBooleanRepeatingLongColumnVector() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.fill(1);
-      return columnVector;
-    }
-
-    protected LongColumnVector getBooleanLongColumnVectorWithNull() {
-      LongColumnVector columnVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.noNulls = false;
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        if (i % 100 == 0) {
-          columnVector.isNull[i] = true;
-        }
-        columnVector.vector[i] = random.nextInt(2);
-      }
-      return columnVector;
-    }
-
-    protected DoubleColumnVector getDoubleColumnVector() {
-      DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        columnVector.vector[i] = random.nextDouble();
-      }
-      return columnVector;
-    }
-
-    protected DoubleColumnVector getRepeatingDoubleColumnVector() {
-      DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.fill(2.0d);
-      return columnVector;
-    }
-
-    protected DoubleColumnVector getDoubleColumnVectorWithNull() {
-      DoubleColumnVector columnVector = new DoubleColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      columnVector.noNulls = false;
-      Random random = new Random();
-      for (int i = 0; i != VectorizedRowBatch.DEFAULT_SIZE; i++) {
-        if (i % 100 == 0) {
-          columnVector.isNull[i] = true;
-        }
-        columnVector.vector[i] = random.nextDouble();
-      }
-      return columnVector;
-    }
-
-  }
-
-  public static class DoubleColAddRepeatingDoubleColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
-        getRepeatingDoubleColumnVector());
-      expression = new DoubleColAddDoubleColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColAddRepeatingLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(),
-        getRepeatingLongColumnVector());
-      expression = new LongColAddLongColumn(0, 1, 2);
-    }
-  }
-
-
-  public static class DoubleColDivideDoubleColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
-        getDoubleColumnVector());
-      expression = new DoubleColDivideDoubleColumn(0, 1, 2);
-    }
-  }
-
-  public static class DoubleColDivideRepeatingDoubleColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
-        getRepeatingDoubleColumnVector());
-      expression = new DoubleColDivideDoubleColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColDivideLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getLongColumnVector(),
-        getLongColumnVector());
-      expression = new LongColDivideLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColDivideRepeatingLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getLongColumnVector(),
-        getRepeatingLongColumnVector());
-      expression = new LongColDivideLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class ColAndColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
-        getBooleanLongColumnVector());
-      expression = new ColAndCol(0, 1, 2);
-    }
-  }
-
-  public static class ColAndRepeatingColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
-          getBooleanRepeatingLongColumnVector());
-      expression = new ColAndCol(0, 1, 2);
-    }
-  }
-
-  public static class RepeatingColAndColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanRepeatingLongColumnVector(),
-          getBooleanLongColumnVector());
-      expression = new ColAndCol(0, 1, 2);
-    }
-  }
-
-  public static class ColOrColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
-          getBooleanLongColumnVector());
-      expression = new ColOrCol(0, 1, 2);
-    }
-  }
-
-  public static class ColOrRepeatingColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
-          getBooleanRepeatingLongColumnVector());
-      expression = new ColOrCol(0, 1, 2);
-    }
-  }
-
-  public static class RepeatingColOrColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanRepeatingLongColumnVector(),
-          getBooleanLongColumnVector());
-      expression = new ColOrCol(0, 1, 2);
-    }
-  }
-
-  public static class NotColBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getBooleanLongColumnVector());
-      expression = new NotCol(0, 1);
-    }
-  }
-
-  public static class IfExprLongColumnLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
-        getLongColumnVector(), getLongColumnVector());
-      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
-    }
-  }
-
-  public static class IfExprRepeatingLongColumnLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
-          getRepeatingLongColumnVector(), getLongColumnVector());
-      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
-    }
-  }
-
-  public static class IfExprLongColumnRepeatingLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
-          getLongColumnVector(), getRepeatingLongColumnVector());
-      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
-    }
-  }
-
-  public static class LongColEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColEqualLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColGreaterEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColGreaterEqualLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColGreaterLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColGreaterLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColLessEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColLessEqualLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColLessLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColLessLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColNotEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
-      expression = new LongColNotEqualLongColumn(0, 1, 2);
-    }
-  }
-
-  public static class LongColEqualLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColEqualLongScalar(0, 0, 1);
-    }
-  }
-
-  public static class LongColGreaterEqualLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColGreaterEqualLongScalar(0, 0, 1);
-    }
-  }
-
-  public static class LongColGreaterLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColGreaterLongScalar(0, 0, 1);
-    }
-  }
-
-  public static class LongColLessEqualLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColLessEqualLongScalar(0, 0, 1);
-    }
-  }
-
-  public static class LongColLessLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColLessLongScalar(0, 0, 1);
-    }
-  }
-
-  public static class LongColNotEqualLongScalarBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongColNotEqualLongScalar(0, 0, 1);
-    }
-  }
-
-
-  public static class LongScalarEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarEqualLongColumn(0, 0, 1);
-    }
-  }
-
-  public static class LongScalarGreaterEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarGreaterEqualLongColumn(0, 0, 1);
-    }
-  }
-
-  public static class LongScalarGreaterLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarGreaterLongColumn(0, 0, 1);
-    }
-  }
-
-  public static class LongScalarLessEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarLessEqualLongColumn(0, 0, 1);
-    }
-  }
-
-  public static class LongScalarLessLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarLessLongColumn(0, 0, 1);
-    }
-  }
-
-  public static class LongScalarNotEqualLongColumnBench extends AbstractExpression {
-    @Override
-    public void setup() {
-      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
-      expression = new LongScalarNotEqualLongColumn(0, 0, 1);
-    }
-  }
-
-  public static void main(String[] args) throws RunnerException {
-    Options opt = new OptionsBuilder().include(".*" + VectorizationBench.class.getSimpleName() +
-      ".*").build();
-    new Runner(opt).run();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9234712b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
new file mode 100644
index 0000000..b6e2fec
--- /dev/null
+++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.benchmark.vectorization;
+
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColDivideLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColAddDoubleColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColDivideDoubleColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.LongColAddLongColumn;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+/**
+ * This test measures the performance for vectorization.
+ * <p/>
+ * This test uses JMH framework for benchmarking.
+ * You may execute this benchmark tool using JMH command line in different ways:
+ * <p/>
+ * To use the settings shown in the main() function, use:
+ * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
+ * <p/>
+ * To use the default settings used by JMH, use:
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
+ * <p/>
+ * To specify different parameters, use:
+ * - This command will use 10 warm-up iterations, 5 test iterations, and 2 forks. And it will
+ * display the Average Time (avgt) in Microseconds (us)
+ * - Benchmark mode. Available modes are:
+ * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, All/all]
+ * - Output time unit. Available time units are: [m, s, ms, us, ns].
+ * <p/>
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
+ * -wi 10 -i 5 -f 2 -bm avgt -tu us
+ */
+@State(Scope.Benchmark)
+public class VectorizedArithmeticBench {
+  public static class DoubleColAddRepeatingDoubleColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
+          getRepeatingDoubleColumnVector());
+      expression = new DoubleColAddDoubleColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColAddRepeatingLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(),
+          getRepeatingLongColumnVector());
+      expression = new LongColAddLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class DoubleColDivideDoubleColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
+          getDoubleColumnVector());
+      expression = new DoubleColDivideDoubleColumn(0, 1, 2);
+    }
+  }
+
+  public static class DoubleColDivideRepeatingDoubleColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getDoubleColumnVector(),
+          getRepeatingDoubleColumnVector());
+      expression = new DoubleColDivideDoubleColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColDivideLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getLongColumnVector(),
+          getLongColumnVector());
+      expression = new LongColDivideLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColDivideRepeatingLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new DoubleColumnVector(), 2, getLongColumnVector(),
+          getRepeatingLongColumnVector());
+      expression = new LongColDivideLongColumn(0, 1, 2);
+    }
+  }
+
+  public static void main(String[] args) throws RunnerException {
+    Options opt = new OptionsBuilder().include(".*" + VectorizedArithmeticBench.class.getSimpleName() +
+        ".*").build();
+    new Runner(opt).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9234712b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
new file mode 100644
index 0000000..536ef76
--- /dev/null
+++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.benchmark.vectorization;
+
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColEqualLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterEqualLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColGreaterLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessEqualLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColLessLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColNotEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongColNotEqualLongScalar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarGreaterEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarGreaterLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarLessEqualLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarLessLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.LongScalarNotEqualLongColumn;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+/**
+ * This test measures the performance for vectorization.
+ * <p/>
+ * This test uses JMH framework for benchmarking.
+ * You may execute this benchmark tool using JMH command line in different ways:
+ * <p/>
+ * To use the settings shown in the main() function, use:
+ * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
+ * <p/>
+ * To use the default settings used by JMH, use:
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
+ * <p/>
+ * To specify different parameters, use:
+ * - This command will use 10 warm-up iterations, 5 test iterations, and 2 forks. And it will
+ * display the Average Time (avgt) in Microseconds (us)
+ * - Benchmark mode. Available modes are:
+ * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, All/all]
+ * - Output time unit. Available time units are: [m, s, ms, us, ns].
+ * <p/>
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
+ * -wi 10 -i 5 -f 2 -bm avgt -tu us
+ */
+@State(Scope.Benchmark)
+public class VectorizedComparisonBench {
+  public static class LongColEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColEqualLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColGreaterEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColGreaterEqualLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColGreaterLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColGreaterLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColLessEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColLessEqualLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColLessLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColLessLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColNotEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getLongColumnVector(), getLongColumnVector());
+      expression = new LongColNotEqualLongColumn(0, 1, 2);
+    }
+  }
+
+  public static class LongColEqualLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColEqualLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongColGreaterEqualLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColGreaterEqualLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongColGreaterLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColGreaterLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongColLessEqualLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColLessEqualLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongColLessLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColLessLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongColNotEqualLongScalarBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongColNotEqualLongScalar(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarEqualLongColumn(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarGreaterEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarGreaterEqualLongColumn(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarGreaterLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarGreaterLongColumn(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarLessEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarLessEqualLongColumn(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarLessLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarLessLongColumn(0, 0, 1);
+    }
+  }
+
+  public static class LongScalarNotEqualLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getLongColumnVector());
+      expression = new LongScalarNotEqualLongColumn(0, 0, 1);
+    }
+  }
+
+  public static void main(String[] args) throws RunnerException {
+    Options opt = new OptionsBuilder().include(".*" + VectorizedComparisonBench.class.getSimpleName() +
+        ".*").build();
+    new Runner(opt).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9234712b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
new file mode 100644
index 0000000..50dadb2
--- /dev/null
+++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.benchmark.vectorization;
+
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.ColAndCol;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.ColOrCol;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.IfExprLongColumnLongColumn;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.NotCol;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+/**
+ * This test measures the performance for vectorization.
+ * <p/>
+ * This test uses JMH framework for benchmarking.
+ * You may execute this benchmark tool using JMH command line in different ways:
+ * <p/>
+ * To use the settings shown in the main() function, use:
+ * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedLogicBench
+ * <p/>
+ * To use the default settings used by JMH, use:
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedLogicBench
+ * <p/>
+ * To specify different parameters, use:
+ * - This command will use 10 warm-up iterations, 5 test iterations, and 2 forks. And it will
+ * display the Average Time (avgt) in Microseconds (us)
+ * - Benchmark mode. Available modes are:
+ * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, All/all]
+ * - Output time unit. Available time units are: [m, s, ms, us, ns].
+ * <p/>
+ * $ java -jar target/benchmarks.jar org.apache.hive.benchmark.vectorization.VectorizedLogicBench
+ * -wi 10 -i 5 -f 2 -bm avgt -tu us
+ */
+@State(Scope.Benchmark)
+public class VectorizedLogicBench {
+
+  public static class ColAndColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
+          getBooleanLongColumnVector());
+      expression = new ColAndCol(0, 1, 2);
+    }
+  }
+
+  public static class ColAndRepeatingColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
+          getBooleanRepeatingLongColumnVector());
+      expression = new ColAndCol(0, 1, 2);
+    }
+  }
+
+  public static class RepeatingColAndColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanRepeatingLongColumnVector(),
+          getBooleanLongColumnVector());
+      expression = new ColAndCol(0, 1, 2);
+    }
+  }
+
+  public static class ColOrColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
+          getBooleanLongColumnVector());
+      expression = new ColOrCol(0, 1, 2);
+    }
+  }
+
+  public static class ColOrRepeatingColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanLongColumnVector(),
+          getBooleanRepeatingLongColumnVector());
+      expression = new ColOrCol(0, 1, 2);
+    }
+  }
+
+  public static class RepeatingColOrColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 2, getBooleanRepeatingLongColumnVector(),
+          getBooleanLongColumnVector());
+      expression = new ColOrCol(0, 1, 2);
+    }
+  }
+
+  public static class NotColBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 1, getBooleanLongColumnVector());
+      expression = new NotCol(0, 1);
+    }
+  }
+
+  public static class IfExprLongColumnLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
+          getLongColumnVector(), getLongColumnVector());
+      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
+    }
+  }
+
+  public static class IfExprRepeatingLongColumnLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
+          getRepeatingLongColumnVector(), getLongColumnVector());
+      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
+    }
+  }
+
+  public static class IfExprLongColumnRepeatingLongColumnBench extends AbstractExpression {
+    @Override
+    public void setup() {
+      rowBatch = buildRowBatch(new LongColumnVector(), 3, getBooleanLongColumnVector(),
+          getLongColumnVector(), getRepeatingLongColumnVector());
+      expression = new IfExprLongColumnLongColumn(0, 1, 2, 3);
+    }
+  }
+
+  public static void main(String[] args) throws RunnerException {
+    Options opt = new OptionsBuilder().include(".*" + VectorizedLogicBench.class.getSimpleName() +
+        ".*").build();
+    new Runner(opt).run();
+  }
+}
\ No newline at end of file


[48/55] [abbrv] hive git commit: HIVE-12309 : TableScan should colStats when available for better data size estimate (Ashutosh Chauhan via Prasanth J)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4f7f8820/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index ee70033..fa29dfe 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -420,10 +420,10 @@ Stage-0
                         Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                         Select Operator [SEL_1]
                            outputColumnNames:["key","c_int","c_float"]
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_0]
                               alias:cbo_t1
-                              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
 PREHOOK: type: QUERY
@@ -486,10 +486,10 @@ Stage-0
                                     Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                                     Select Operator [SEL_1]
                                        outputColumnNames:["key","c_int","c_float"]
-                                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                        TableScan [TS_0]
                                           alias:cbo_t1
-                                          Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                          Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
 PREHOOK: type: QUERY
@@ -571,7 +571,7 @@ Stage-0
                                     |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                                     |           TableScan [TS_29]
                                     |              alias:cbo_t3
-                                    |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Reducer 4 [SIMPLE_EDGE]
                                        Reduce Output Operator [RS_32]
                                           key expressions:_col0 (type: string)
@@ -631,7 +631,7 @@ Stage-0
                                                 |                          Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
                                                 |                          TableScan [TS_11]
                                                 |                             alias:cbo_t2
-                                                |                             Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                             Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                                 |<-Reducer 3 [SIMPLE_EDGE]
                                                    Reduce Output Operator [RS_23]
                                                       key expressions:_col0 (type: string)
@@ -673,7 +673,7 @@ Stage-0
                                                                            Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
                                                                            TableScan [TS_0]
                                                                               alias:cbo_t1
-                                                                              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int desc
 PREHOOK: type: QUERY
@@ -751,7 +751,7 @@ Stage-0
                                     |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                     |        TableScan [TS_28]
                                     |           alias:cbo_t3
-                                    |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Reducer 4 [SIMPLE_EDGE]
                                        Reduce Output Operator [RS_30]
                                           key expressions:_col0 (type: string)
@@ -814,7 +814,7 @@ Stage-0
                                                 |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                 |                             TableScan [TS_0]
                                                 |                                alias:cbo_t1
-                                                |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                                Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                                 |<-Reducer 9 [SIMPLE_EDGE]
                                                    Reduce Output Operator [RS_24]
                                                       key expressions:_col0 (type: string)
@@ -847,7 +847,7 @@ Stage-0
                                                                         Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                                         TableScan [TS_13]
                                                                            alias:cbo_t2
-                                                                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                           Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
 PREHOOK: type: QUERY
@@ -914,7 +914,7 @@ Stage-0
                               |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                               |        TableScan [TS_23]
                               |           alias:cbo_t3
-                              |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              |           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                               |<-Reducer 3 [SIMPLE_EDGE]
                               |  Reduce Output Operator [RS_25]
                               |     key expressions:_col0 (type: string)
@@ -959,7 +959,7 @@ Stage-0
                               |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                               |                             TableScan [TS_0]
                               |                                alias:cbo_t1
-                              |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                                Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                               |<-Reducer 7 [SIMPLE_EDGE]
                                  Reduce Output Operator [RS_26]
                                     key expressions:_col0 (type: string)
@@ -992,7 +992,7 @@ Stage-0
                                                       Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                       TableScan [TS_13]
                                                          alias:cbo_t2
-                                                         Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                         Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
 PREHOOK: type: QUERY
@@ -1070,7 +1070,7 @@ Stage-0
                                     |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                     |        TableScan [TS_26]
                                     |           alias:cbo_t3
-                                    |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Reducer 3 [SIMPLE_EDGE]
                                     |  Reduce Output Operator [RS_28]
                                     |     key expressions:_col0 (type: string)
@@ -1115,7 +1115,7 @@ Stage-0
                                     |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                     |                             TableScan [TS_0]
                                     |                                alias:cbo_t1
-                                    |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |                                Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Reducer 9 [SIMPLE_EDGE]
                                        Reduce Output Operator [RS_29]
                                           key expressions:_col0 (type: string)
@@ -1160,7 +1160,7 @@ Stage-0
                                                                   Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                                   TableScan [TS_13]
                                                                      alias:cbo_t2
-                                                                     Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                     Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
 PREHOOK: type: QUERY
@@ -1230,7 +1230,7 @@ Stage-0
                               |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                               |           TableScan [TS_27]
                               |              alias:cbo_t3
-                              |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                               |<-Reducer 3 [SIMPLE_EDGE]
                                  Reduce Output Operator [RS_30]
                                     key expressions:_col0 (type: string)
@@ -1284,7 +1284,7 @@ Stage-0
                                           |                       Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                           |                       TableScan [TS_0]
                                           |                          alias:cbo_t1
-                                          |                          Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                          |                          Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                           |<-Reducer 7 [SIMPLE_EDGE]
                                              Reduce Output Operator [RS_23]
                                                 key expressions:_col0 (type: string)
@@ -1317,7 +1317,7 @@ Stage-0
                                                                   Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                                   TableScan [TS_10]
                                                                      alias:cbo_t2
-                                                                     Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                     Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc
 PREHOOK: type: QUERY
@@ -1355,10 +1355,10 @@ Stage-0
                         Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Select Operator [SEL_1]
                            outputColumnNames:["key"]
-                           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_0]
                               alias:src
-                              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
 	UNION  ALL
@@ -1416,10 +1416,10 @@ Stage-0
                |                 Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                |                 Select Operator [SEL_1]
                |                    outputColumnNames:["key"]
-               |                    Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |                    Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                |                    TableScan [TS_0]
                |                       alias:s1
-               |                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |                       Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Reducer 6 [CONTAINS]
                |  Reduce Output Operator [RS_24]
                |     key expressions:_col0 (type: string)
@@ -1443,10 +1443,10 @@ Stage-0
                |                 Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                |                 Select Operator [SEL_8]
                |                    outputColumnNames:["key"]
-               |                    Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |                    Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                |                    TableScan [TS_7]
                |                       alias:s1
-               |                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |                       Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Reducer 8 [CONTAINS]
                   Reduce Output Operator [RS_24]
                      key expressions:_col0 (type: string)
@@ -1470,10 +1470,10 @@ Stage-0
                                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                                  Select Operator [SEL_17]
                                     outputColumnNames:["key"]
-                                    Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                                     TableScan [TS_16]
                                        alias:s1
-                                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
     UNION  ALL
@@ -1550,10 +1550,10 @@ Stage-0
                      |                    Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                      |                    Select Operator [SEL_1]
                      |                       outputColumnNames:["key"]
-                     |                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                     |                       Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                      |                       TableScan [TS_0]
                      |                          alias:s1
-                     |                          Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                     |                          Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                      |<-Reducer 7 [CONTAINS]
                      |  Reduce Output Operator [RS_26]
                      |     key expressions:_col0 (type: string)
@@ -1584,10 +1584,10 @@ Stage-0
                      |                    Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                      |                    Select Operator [SEL_8]
                      |                       outputColumnNames:["key"]
-                     |                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                     |                       Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                      |                       TableScan [TS_7]
                      |                          alias:s1
-                     |                          Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                     |                          Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                      |<-Reducer 9 [CONTAINS]
                         Reduce Output Operator [RS_26]
                            key expressions:_col0 (type: string)
@@ -1618,10 +1618,10 @@ Stage-0
                                           Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                                           Select Operator [SEL_17]
                                              outputColumnNames:["key"]
-                                             Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                             Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                                              TableScan [TS_16]
                                                 alias:s1
-                                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
 PREHOOK: type: QUERY
@@ -1660,7 +1660,7 @@ Stage-0
             |           Statistics:Num rows: 6 Data size: 425 Basic stats: COMPLETE Column stats: COMPLETE
             |           TableScan [TS_0]
             |              alias:cbo_t1
-            |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+            |              Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 3 [SIMPLE_EDGE]
                Reduce Output Operator [RS_9]
                   key expressions:_col0 (type: string)
@@ -1675,7 +1675,7 @@ Stage-0
                         Statistics:Num rows: 6 Data size: 425 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_3]
                            alias:cbo_t3
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
 PREHOOK: type: QUERY
@@ -1715,7 +1715,7 @@ Stage-0
                |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |        TableScan [TS_0]
                |           alias:cbo_t1
-               |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_5]
                      key expressions:_col0 (type: string)
@@ -1728,7 +1728,7 @@ Stage-0
                         Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_2]
                            alias:cbo_t2
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
 PREHOOK: type: QUERY
@@ -1768,7 +1768,7 @@ Stage-0
                |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |        TableScan [TS_0]
                |           alias:cbo_t1
-               |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_5]
                      key expressions:_col0 (type: string)
@@ -1781,7 +1781,7 @@ Stage-0
                         Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_2]
                            alias:cbo_t2
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
 PREHOOK: type: QUERY
@@ -1824,7 +1824,7 @@ Stage-0
                |           Statistics:Num rows: 18 Data size: 1488 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_0]
                |              alias:cbo_t1
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                |  Reduce Output Operator [RS_9]
                |     key expressions:_col0 (type: string)
@@ -1840,7 +1840,7 @@ Stage-0
                |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_2]
                |              alias:cbo_t3
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_11]
                      key expressions:_col0 (type: string)
@@ -1856,7 +1856,7 @@ Stage-0
                            Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_4]
                               alias:cbo_t2
-                              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
 PREHOOK: type: QUERY
@@ -1899,7 +1899,7 @@ Stage-0
                |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_0]
                |              alias:cbo_t1
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                |  Reduce Output Operator [RS_9]
                |     key expressions:_col0 (type: string)
@@ -1914,7 +1914,7 @@ Stage-0
                |           Statistics:Num rows: 18 Data size: 1360 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_2]
                |              alias:cbo_t3
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_11]
                      key expressions:_col0 (type: string)
@@ -1930,7 +1930,7 @@ Stage-0
                            Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_4]
                               alias:cbo_t2
-                              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
 PREHOOK: type: QUERY
@@ -1977,7 +1977,7 @@ Stage-0
                   |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_11]
                   |              alias:cbo_t3
-                  |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Reducer 2 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_14]
                         key expressions:_col0 (type: string)
@@ -2008,7 +2008,7 @@ Stage-0
                            |           Statistics:Num rows: 6 Data size: 465 Basic stats: COMPLETE Column stats: COMPLETE
                            |           TableScan [TS_0]
                            |              alias:cbo_t1
-                           |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 4 [SIMPLE_EDGE]
                               Reduce Output Operator [RS_7]
                                  key expressions:_col0 (type: string)
@@ -2024,7 +2024,7 @@ Stage-0
                                        Statistics:Num rows: 6 Data size: 465 Basic stats: COMPLETE Column stats: COMPLETE
                                        TableScan [TS_3]
                                           alias:cbo_t2
-                                          Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                          Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
 PREHOOK: type: QUERY
@@ -2070,7 +2070,7 @@ Stage-0
                   |           Statistics:Num rows: 6 Data size: 465 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_0]
                   |              alias:cbo_t1
-                  |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 3 [SIMPLE_EDGE]
                   |  Reduce Output Operator [RS_9]
                   |     key expressions:_col0 (type: string)
@@ -2086,7 +2086,7 @@ Stage-0
                   |           Statistics:Num rows: 6 Data size: 465 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_3]
                   |              alias:cbo_t2
-                  |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 4 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_10]
                         key expressions:_col0 (type: string)
@@ -2099,7 +2099,7 @@ Stage-0
                            Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_6]
                               alias:cbo_t3
-                              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
 PREHOOK: type: QUERY
@@ -2154,10 +2154,10 @@ Stage-0
                                  Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                                  Select Operator [SEL_1]
                                     outputColumnNames:["key","c_int","c_float"]
-                                    Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                     TableScan [TS_0]
                                        alias:cbo_t1
-                                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
 PREHOOK: type: QUERY
@@ -2233,10 +2233,10 @@ Stage-0
                                              Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                                              Select Operator [SEL_1]
                                                 outputColumnNames:["key","c_int","c_float"]
-                                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                                 TableScan [TS_0]
                                                    alias:cbo_t1
-                                                   Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
 PREHOOK: type: QUERY
@@ -2293,7 +2293,7 @@ Stage-0
                                           Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                                           TableScan [TS_0]
                                              alias:cbo_t1
-                                             Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                             Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5
 PREHOOK: type: QUERY
@@ -2356,7 +2356,7 @@ Stage-0
                                           Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                           TableScan [TS_0]
                                              alias:cbo_t1
-                                             Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                             Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
 PREHOOK: type: QUERY
@@ -2441,7 +2441,7 @@ Stage-0
                                        |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
                                        |           TableScan [TS_31]
                                        |              alias:cbo_t3
-                                       |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                       |              Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
                                        |<-Reducer 4 [SIMPLE_EDGE]
                                           Reduce Output Operator [RS_34]
                                              key expressions:_col0 (type: string)
@@ -2507,7 +2507,7 @@ Stage-0
                                                    |                                Statistics:Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE
                                                    |                                TableScan [TS_12]
                                                    |                                   alias:cbo_t2
-                                                   |                                   Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   |                                   Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                                                    |<-Reducer 3 [SIMPLE_EDGE]
                                                       Reduce Output Operator [RS_25]
                                                          key expressions:_col0 (type: string)
@@ -2555,7 +2555,7 @@ Stage-0
                                                                                     Statistics:Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE
                                                                                     TableScan [TS_0]
                                                                                        alias:cbo_t1
-                                                                                       Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                                       Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t1.c_int           from cbo_t1 left semi join   cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)
 PREHOOK: type: QUERY
@@ -2598,7 +2598,7 @@ Stage-0
                |           Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_0]
                |              alias:cbo_t1
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_10]
                      key expressions:_col0 (type: string)
@@ -2617,7 +2617,7 @@ Stage-0
                               Statistics:Num rows: 18 Data size: 1360 Basic stats: COMPLETE Column stats: COMPLETE
                               TableScan [TS_3]
                                  alias:cbo_t2
-                                 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                 Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where  (b + 1 = 2) and (R.b > 0 or c >= 0)
 PREHOOK: type: QUERY
@@ -2660,7 +2660,7 @@ Stage-0
                |           Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE
                |           TableScan [TS_0]
                |              alias:cbo_t1
-               |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |              Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 3 [SIMPLE_EDGE]
                |  Reduce Output Operator [RS_19]
                |     key expressions:_col0 (type: string)
@@ -2679,7 +2679,7 @@ Stage-0
                |              Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE
                |              TableScan [TS_7]
                |                 alias:cbo_t2
-               |                 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+               |                 Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_21]
                      key expressions:_col0 (type: string)
@@ -2698,7 +2698,7 @@ Stage-0
                               Statistics:Num rows: 18 Data size: 1360 Basic stats: COMPLETE Column stats: COMPLETE
                               TableScan [TS_10]
                                  alias:cbo_t3
-                                 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                 Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
 PREHOOK: type: QUERY
@@ -2773,7 +2773,7 @@ Stage-0
                            |              Statistics:Num rows: 6 Data size: 425 Basic stats: COMPLETE Column stats: COMPLETE
                            |              TableScan [TS_32]
                            |                 alias:cbo_t3
-                           |                 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           |                 Statistics:Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Reducer 3 [SIMPLE_EDGE]
                            |  Reduce Output Operator [RS_40]
                            |     key expressions:_col0 (type: string)
@@ -2827,7 +2827,7 @@ Stage-0
                            |                                      Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                            |                                      TableScan [TS_0]
                            |                                         alias:cbo_t1
-                           |                                         Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           |                                         Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Reducer 9 [SIMPLE_EDGE]
                               Reduce Output Operator [RS_42]
                                  key expressions:_col0 (type: string)
@@ -2874,7 +2874,7 @@ Stage-0
                                                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                                                             TableScan [TS_17]
                                                                alias:cbo_t2
-                                                               Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                               Statistics:Num rows: 20 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1
 PREHOOK: type: QUERY
@@ -3032,7 +3032,7 @@ Stage-0
                   |        Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   |        TableScan [TS_0]
                   |           alias:b
-                  |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  |           Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Reducer 4 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_11]
                         key expressions:_col1 (type: string)
@@ -3061,7 +3061,7 @@ Stage-0
                                        Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                                        TableScan [TS_3]
                                           alias:b
-                                          Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                          Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * 
 from src_cbo b 
@@ -3121,7 +3121,7 @@ Stage-0
                   |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_7]
                   |              alias:b
-                  |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Reducer 2 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_10]
                         key expressions:_col1 (type: string), _col0 (type: string)
@@ -3144,10 +3144,10 @@ Stage-0
                                  Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
                                  Select Operator [SEL_2]
                                     outputColumnNames:["key","value"]
-                                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                                     TableScan [TS_0]
                                        alias:b
-                                       Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: create view cv1 as 
 select * 
@@ -3208,7 +3208,7 @@ Stage-0
             |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |           TableScan [TS_0]
             |              alias:b
-            |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 3 [SIMPLE_EDGE]
                Reduce Output Operator [RS_13]
                   key expressions:_col0 (type: string), _col1 (type: string)
@@ -3227,7 +3227,7 @@ Stage-0
                            Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_5]
                               alias:b
-                              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * 
 from (select * 
@@ -3280,7 +3280,7 @@ Stage-0
             |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |           TableScan [TS_0]
             |              alias:b
-            |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 3 [SIMPLE_EDGE]
                Reduce Output Operator [RS_13]
                   key expressions:_col0 (type: string), _col1 (type: string)
@@ -3299,7 +3299,7 @@ Stage-0
                            Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_5]
                               alias:b
-                              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * 
 from src_cbo 
@@ -3343,7 +3343,7 @@ Stage-0
             |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |           TableScan [TS_0]
             |              alias:src_cbo
-            |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 3 [SIMPLE_EDGE]
                Reduce Output Operator [RS_13]
                   key expressions:_col0 (type: string)
@@ -3362,7 +3362,7 @@ Stage-0
                            Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_5]
                               alias:src_cbo
-                              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
@@ -3425,7 +3425,7 @@ Stage-0
                |     |           Statistics:Num rows: 16 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
                |     |           TableScan [TS_0]
                |     |              alias:lineitem
-               |     |              Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE
+               |     |              Statistics:Num rows: 100 Data size: 1600 Basic stats: COMPLETE Column stats: COMPLETE
                |     |<-Map 4 [SIMPLE_EDGE]
                |        Reduce Output Operator [RS_19]
                |           key expressions:_col0 (type: int)
@@ -3444,7 +3444,7 @@ Stage-0
                |                    Statistics:Num rows: 14 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE
                |                    TableScan [TS_3]
                |                       alias:lineitem
-               |                       Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE
+               |                       Statistics:Num rows: 100 Data size: 9600 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Reducer 6 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_24]
                      key expressions:_col0 (type: int)
@@ -3470,7 +3470,7 @@ Stage-0
                                  Statistics:Num rows: 100 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
                                  TableScan [TS_8]
                                     alias:lineitem
-                                    Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 100 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select key, value, count(*) 
 from src_cbo b
@@ -3553,7 +3553,7 @@ Stage-0
             |                 |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |                 |           TableScan [TS_0]
             |                 |              alias:b
-            |                 |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |                 |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             |                 |<-Map 5 [SIMPLE_EDGE]
             |                    Reduce Output Operator [RS_15]
             |                       key expressions:_col0 (type: string)
@@ -3572,7 +3572,7 @@ Stage-0
             |                                Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
             |                                TableScan [TS_7]
             |                                   alias:b
-            |                                   Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |                                   Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Reducer 7 [SIMPLE_EDGE]
                Reduce Output Operator [RS_34]
                   key expressions:_col0 (type: bigint)
@@ -3614,7 +3614,7 @@ Stage-0
                                           Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                                           TableScan [TS_22]
                                              alias:b
-                                             Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                             Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select p_mfgr, p_name, avg(p_size) 
 from part 
@@ -3681,7 +3681,7 @@ Stage-0
             |                    Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
             |                    TableScan [TS_0]
             |                       alias:part
-            |                       Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+            |                       Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Reducer 5 [SIMPLE_EDGE]
                Reduce Output Operator [RS_18]
                   key expressions:_col0 (type: string)
@@ -3709,11 +3709,11 @@ Stage-0
                                     key expressions:p_mfgr (type: string), p_size (type: int)
                                     Map-reduce partition columns:p_mfgr (type: string)
                                     sort order:++
-                                    Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
                                     value expressions:p_name (type: string)
                                     TableScan [TS_7]
                                        alias:part
-                                       Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * 
 from src_cbo 
@@ -3780,7 +3780,7 @@ Stage-0
                         |           Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                         |           TableScan [TS_14]
                         |              alias:src_cbo
-                        |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                        |              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                         |<-Reducer 2 [SIMPLE_EDGE]
                            Reduce Output Operator [RS_21]
                               key expressions:_col0 (type: string)
@@ -3803,7 +3803,7 @@ Stage-0
                               |        Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                               |        TableScan [TS_0]
                               |           alias:src_cbo
-                              |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                              |           Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                               |<-Reducer 6 [SIMPLE_EDGE]
                                  Reduce Output Operator [RS_19]
                                     sort order:
@@ -3833,7 +3833,7 @@ Stage-0
                                                          Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
                                                          TableScan [TS_3]
                                                             alias:src_cbo
-                                                            Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                                            Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select p_mfgr, b.p_name, p_size 
 from part b 
@@ -3892,7 +3892,7 @@ Stage-0
                   |           Statistics:Num rows: 8 Data size: 1784 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_14]
                   |              alias:b
-                  |              Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Reducer 2 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_21]
                         key expressions:_col0 (type: string), _col1 (type: string)
@@ -3915,7 +3915,7 @@ Stage-0
                         |        Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
                         |        TableScan [TS_0]
                         |           alias:b
-                        |           Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                        |           Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
                         |<-Reducer 5 [SIMPLE_EDGE]
                            Reduce Output Operator [RS_19]
                               sort order:
@@ -3945,7 +3945,7 @@ Stage-0
                                                    Statistics:Num rows: 1 Data size: 223 Basic stats: COMPLETE Column stats: COMPLETE
                                                    TableScan [TS_3]
                                                       alias:b
-                                                      Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select p_name, p_size 
 from 
@@ -4020,7 +4020,7 @@ Stage-0
                   |        |        Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE
                   |        |        TableScan [TS_0]
                   |        |           alias:part
-                  |        |           Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                  |        |           Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE
                   |        |<-Reducer 6 [SIMPLE_EDGE]
                   |           Reduce Output Operator [RS_28]
                   |              key expressions:_col0 (type: double)
@@ -4045,7 +4045,7 @@ Stage-0
                   |                          Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   |                          TableScan [TS_3]
                   |                             alias:part
-                  |                             Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                  |                             Statistics:Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Reducer 8 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_31]
                         sort order:
@@ -4082,7 +4082,7 @@ Stage-0
                                                    Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                                                    TableScan [TS_10]
                                                       alias:part
-                                                      Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      Statistics:Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select b.p_mfgr, min(p_retailprice) 
 from part b 
@@ -4175,7 +4175,7 @@ Stage-0
                         |                    Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                         |                    TableScan [TS_22]
                         |                       alias:b
-                        |                       Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                        |                       Statistics:Num rows: 26 Data size: 2756 Basic stats: COMPLETE Column stats: COMPLETE
                         |<-Reducer 3 [SIMPLE_EDGE]
                            Reduce Output Operator [RS_33]
                               key expressions:_col0 (type: string), _col1 (type: double)
@@ -4211,10 +4211,10 @@ Stage-0
                               |              Statistics:Num rows: 5 Data size: 530 Basic stats: COMPLETE Column stats: COMPLETE
                               |              Select Operator [SEL_2]
                               |                 outputColumnNames:["p_mfgr","p_retailprice"]
-                              |                 Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                 Statistics:Num rows: 26 Data size: 2756 Basic stats: COMPLETE Column stats: COMPLETE
                               |                 TableScan [TS_0]
                               |                    alias:b
-                              |                    Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                    Statistics:Num rows: 26 Data size: 2756 Basic stats: COMPLETE Column stats: COMPLETE
                               |<-Reducer 8 [SIMPLE_EDGE]
                                  Reduce Output Operator [RS_31]
                                     sort order:
@@ -4261,10 +4261,10 @@ Stage-0
                                                                   Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
                                                                   Select Operator [SEL_8]
                                                                      outputColumnNames:["p_mfgr","p_retailprice"]
-                                                                     Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                     Statistics:Num rows: 26 Data size: 2756 Basic stats: COMPLETE Column stats: COMPLETE
                                                                      TableScan [TS_7]
                                                                         alias:b
-                                                                        Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                        Statistics:Num rows: 26 Data size: 2756 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1
 PREHOOK: type: QUERY
@@ -4298,11 +4298,11 @@ Stage-0
                         key expressions:0 (type: int)
                         Map-reduce partition columns:0 (type: int)
                         sort order:+
-                        Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics:Num rows: 20 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions:c_int (type: int), c_float (type: float)
                         TableScan [TS_0]
                            alias:cbo_t1
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1
 PREHOOK: type: QUERY
@@ -4336,11 +4336,11 @@ Stage-0
                         key expressions:0 (type: int)
                         Map-reduce partition columns:0 (type: int)
                         sort order:+
-                        Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics:Num rows: 20 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions:c_int (type: int), c_float (type: float)
                         TableScan [TS_0]
                            alias:cbo_t1
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1
 PREHOOK: type: QUERY
@@ -4374,11 +4374,11 @@ Stage-0
                         key expressions:key (type: string), value (type: string)
                         Map-reduce partition columns:key (type: string)
                         sort order:++
-                        Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics:Num rows: 20 Data size: 3204 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions:c_int (type: int), c_float (type: float)
                         TableScan [TS_0]
                            alias:cbo_t1
-                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 20 Data size: 3204 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select *, rank() over(partition by key order by value) as rr from src1
 PREHOOK: type: QUERY
@@ -4412,10 +4412,10 @@ Stage-0
                         key expressions:key (type: string), value (type: string)
                         Map-reduce partition columns:key (type: string)
                         sort order:++
-                        Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics:Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_0]
                            alias:src1
-                           Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain
 select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -4500,7 +4500,7 @@ Stage-0
                                     |           Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                     |           TableScan [TS_0]
                                     |              alias:y
-                                    |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Map 5 [SIMPLE_EDGE]
                                        Reduce Output Operator [RS_7]
                                           key expressions:_col0 (type: string)
@@ -4515,7 +4515,7 @@ Stage-0
                                                 Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                                 TableScan [TS_2]
                                                    alias:x
-                                                   Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain
 select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -4600,7 +4600,7 @@ Stage-0
                                     |           Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                     |           TableScan [TS_0]
                                     |              alias:y
-                                    |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Map 5 [SIMPLE_EDGE]
                                        Reduce Output Operator [RS_7]
                                           key expressions:_col0 (type: string)
@@ -4615,7 +4615,7 @@ Stage-0
                                                 Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                                 TableScan [TS_2]
                                                    alias:x
-                                                   Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain
 select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -4701,7 +4701,7 @@ Stage-0
                                     |           Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                     |           TableScan [TS_2]
                                     |              alias:x
-                                    |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |              Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Select Operator [SEL_1]
                                           outputColumnNames:["_col0"]
                                           Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
@@ -4710,7 +4710,7 @@ Stage-0
                                              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                              TableScan [TS_0]
                                                 alias:y
-                                                Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain
 select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -4792,7 +4792,7 @@ Stage-0
                                  |           Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                  |           TableScan [TS_0]
                                  |              alias:x
-                                 |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+                                 |              Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                  |<-Map 5 [SIMPLE_EDGE]
                                     Reduce Output Operator [RS_9]
                                        key expressions:_col0 (type: string)
@@ -4811,7 +4811,7 @@ Stage-0
                                                 Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                                                 TableScan [TS_2]
                                                    alias:y
-                                                   Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain create table abcd (a int, b int, c int, d int)
 PREHOOK: type: CREATETABLE
@@ -5198,7 +5198,7 @@ Stage-0
             |        Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             |        TableScan [TS_0]
             |           alias:src
-            |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            |           Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 3 [SIMPLE_EDGE]
                Reduce Output Operator [RS_6]
                   sort order:
@@ -5281,7 +5281,7 @@ Stage-3
                                              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                                              TableScan [TS_0]
                                                 alias:src
-                                                Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Stage-0
                Move Operator
                    Please refer to the previous Stage-1
@@ -5349,7 +5349,7 @@ Stage-3
                                              Statistics:Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE
                                              TableScan [TS_0]
                                                 alias:src
-                                                Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Stage-0
                Move Operator
                    Please refer to the previous Stage-1
@@ -5451,7 +5451,7 @@ Stage-0
                   |           Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                   |           TableScan [TS_0]
                   |              alias:src
-                  |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  |              Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 4 [SIMPLE_EDGE]
                      Reduce Output Operator [RS_7]
                         sort order:
@@ -5465,7 +5465,7 @@ Stage-0
                               Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                               TableScan [TS_3]
                                  alias:src
-                                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                                 Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: CREATE TABLE myinput1(key int, value int)
 PREHOOK: type: CREATETABLE
@@ -6437,7 +6437,7 @@ Stage-0
             |     |           Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
             |     |           TableScan [TS_3]
             |     |              alias:src1
-            |     |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+            |     |              Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
             |     |<-Select Operator [SEL_2]
             |           outputColumnNames:["_col0","_col1"]
             |           Statistics:Num rows: 666 Data size: 118548 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6446,7 +6446,7 @@ Stage-0
             |              Statistics:Num rows: 666 Data size: 118548 Basic stats: COMPLETE Column stats: COMPLETE
             |              TableScan [TS_0]
             |                 alias:srcpart
-            |                 Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
+            |                 Statistics:Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 4 [SIMPLE_EDGE]
                Reduce Output Operator [RS_16]
                   key expressions:_col0 (type: string)
@@ -6461,7 +6461,7 @@ Stage-0
                         Statistics:Num rows: 166 Data size: 15106 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_5]
                            alias:src
-                           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
 PREHOOK: type: QUERY
@@ -6514,7 +6514,7 @@ Stage-0
             |     |           Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
             |     |           TableScan [TS_3]
             |     |              alias:src1
-            |     |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE
+            |     |              Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
             |     |<-Select Operator [SEL_2]
             |           outputColumnNames:["_col0","_col1"]
             |           Statistics:Num rows: 666 Data size: 118548 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6523,7 +6523,7 @@ Stage-0
             |              Statistics:Num rows: 666 Data size: 118548 Basic stats: COMPLETE Column stats: COMPLETE
             |              TableScan [TS_0]
             |                 alias:srcpart
-            |                 Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
+            |                 Statistics:Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 4 [SIMPLE_EDGE]
                Reduce Output Operator [RS_16]
                   key expressions:_col0 (type: string)
@@ -6538,7 +6538,7 @@ Stage-0
                         Statistics:Num rows: 166 Data size: 15106 Basic stats: COMPLETE Column stats: COMPLETE
                         TableScan [TS_5]
                            alias:src
-                           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                           Statistics:Num rows: 500 Data size: 45500 Basic stats: COMPL

<TRUNCATED>