You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vg...@apache.org on 2018/02/07 19:12:42 UTC

[1/5] hive git commit: HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 31207eded -> c50ebb34b


http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientpositive/show_functions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_functions.q.out b/ql/src/test/results/clientpositive/show_functions.q.out
index 43e4a5d..fcbf2c5 100644
--- a/ql/src/test/results/clientpositive/show_functions.q.out
+++ b/ql/src/test/results/clientpositive/show_functions.q.out
@@ -81,6 +81,7 @@ div
 e
 elt
 encode
+enforce_constraint
 ewah_bitmap
 ewah_bitmap_and
 ewah_bitmap_empty


[2/5] hive git commit: HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by vg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
new file mode 100644
index 0000000..7b1dd40
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -0,0 +1,6010 @@
+PREHOOK: query: CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table1
+POSTHOOK: query: CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table1
+PREHOOK: query: explain INSERT INTO table1 values('not', 'null', 'constraint')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain INSERT INTO table1 values('not', 'null', 'constraint')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: _dummy_table
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: array(const struct('not','null','constraint')) (type: array<struct<col1:string,col2:string,col3:string>>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: inline
+                      Select Operator
+                        expressions: col1 (type: string), col2 (type: string), col3 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Filter Operator
+                          predicate: (enforce_constraint(_col0 is not null) and enforce_constraint(_col2 is not null)) (type: boolean)
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.table1
+                          Select Operator
+                            expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                            outputColumnNames: a, b, c
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            Group By Operator
+                              aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
+                              mode: hash
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                              Reduce Output Operator
+                                sort order: 
+                                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                                value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.table1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b, c
+          Column Types: string, string, string
+          Table: default.table1
+
+PREHOOK: query: INSERT INTO table1 values('not', 'null', 'constraint')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@table1
+POSTHOOK: query: INSERT INTO table1 values('not', 'null', 'constraint')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@table1
+POSTHOOK: Lineage: table1.a SCRIPT []
+POSTHOOK: Lineage: table1.b SCRIPT []
+POSTHOOK: Lineage: table1.c SCRIPT []
+PREHOOK: query: SELECT * FROM table1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM table1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table1
+#### A masked pattern was here ####
+not	null	constraint
+PREHOOK: query: explain insert into table1(a,c) values('1','2')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table1(a,c) values('1','2')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: _dummy_table
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: array(const struct('1','2')) (type: array<struct<col1:string,col2:string>>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: inline
+                      Select Operator
+                        expressions: col1 (type: string), null (type: string), col2 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                        Filter Operator
+                          predicate: (enforce_constraint(_col0 is not null) and enforce_constraint(_col2 is not null)) (type: boolean)
+                          Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.table1
+                          Select Operator
+                            expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                            outputColumnNames: a, b, c
+                            Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                            Group By Operator
+                              aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
+                              mode: hash
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                              Reduce Output Operator
+                                sort order: 
+                                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                                value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.table1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b, c
+          Column Types: string, string, string
+          Table: default.table1
+
+PREHOOK: query: insert into table1(a,c) values('1','2')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@table1
+POSTHOOK: query: insert into table1(a,c) values('1','2')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@table1
+POSTHOOK: Lineage: table1.a SCRIPT []
+POSTHOOK: Lineage: table1.b SIMPLE []
+POSTHOOK: Lineage: table1.c SCRIPT []
+PREHOOK: query: explain INSERT INTO table1 select key, src.value, value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain INSERT INTO table1 select key, src.value, value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean)
+                    Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.table1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                        outputColumnNames: a, b, c
+                        Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.table1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b, c
+          Column Types: string, string, string
+          Table: default.table1
+
+PREHOOK: query: INSERT INTO table1 select key, src.value, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table1
+POSTHOOK: query: INSERT INTO table1 select key, src.value, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table1
+POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.c SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM table1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM table1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table1
+#### A masked pattern was here ####
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+1	NULL	2
+10	val_10	val_10
+100	val_100	val_100
+100	val_100	val_100
+103	val_103	val_103
+103	val_103	val_103
+104	val_104	val_104
+104	val_104	val_104
+105	val_105	val_105
+11	val_11	val_11
+111	val_111	val_111
+113	val_113	val_113
+113	val_113	val_113
+114	val_114	val_114
+116	val_116	val_116
+118	val_118	val_118
+118	val_118	val_118
+119	val_119	val_119
+119	val_119	val_119
+119	val_119	val_119
+12	val_12	val_12
+12	val_12	val_12
+120	val_120	val_120
+120	val_120	val_120
+125	val_125	val_125
+125	val_125	val_125
+126	val_126	val_126
+128	val_128	val_128
+128	val_128	val_128
+128	val_128	val_128
+129	val_129	val_129
+129	val_129	val_129
+131	val_131	val_131
+133	val_133	val_133
+134	val_134	val_134
+134	val_134	val_134
+136	val_136	val_136
+137	val_137	val_137
+137	val_137	val_137
+138	val_138	val_138
+138	val_138	val_138
+138	val_138	val_138
+138	val_138	val_138
+143	val_143	val_143
+145	val_145	val_145
+146	val_146	val_146
+146	val_146	val_146
+149	val_149	val_149
+149	val_149	val_149
+15	val_15	val_15
+15	val_15	val_15
+150	val_150	val_150
+152	val_152	val_152
+152	val_152	val_152
+153	val_153	val_153
+155	val_155	val_155
+156	val_156	val_156
+157	val_157	val_157
+158	val_158	val_158
+160	val_160	val_160
+162	val_162	val_162
+163	val_163	val_163
+164	val_164	val_164
+164	val_164	val_164
+165	val_165	val_165
+165	val_165	val_165
+166	val_166	val_166
+167	val_167	val_167
+167	val_167	val_167
+167	val_167	val_167
+168	val_168	val_168
+169	val_169	val_169
+169	val_169	val_169
+169	val_169	val_169
+169	val_169	val_169
+17	val_17	val_17
+170	val_170	val_170
+172	val_172	val_172
+172	val_172	val_172
+174	val_174	val_174
+174	val_174	val_174
+175	val_175	val_175
+175	val_175	val_175
+176	val_176	val_176
+176	val_176	val_176
+177	val_177	val_177
+178	val_178	val_178
+179	val_179	val_179
+179	val_179	val_179
+18	val_18	val_18
+18	val_18	val_18
+180	val_180	val_180
+181	val_181	val_181
+183	val_183	val_183
+186	val_186	val_186
+187	val_187	val_187
+187	val_187	val_187
+187	val_187	val_187
+189	val_189	val_189
+19	val_19	val_19
+190	val_190	val_190
+191	val_191	val_191
+191	val_191	val_191
+192	val_192	val_192
+193	val_193	val_193
+193	val_193	val_193
+193	val_193	val_193
+194	val_194	val_194
+195	val_195	val_195
+195	val_195	val_195
+196	val_196	val_196
+197	val_197	val_197
+197	val_197	val_197
+199	val_199	val_199
+199	val_199	val_199
+199	val_199	val_199
+2	val_2	val_2
+20	val_20	val_20
+200	val_200	val_200
+200	val_200	val_200
+201	val_201	val_201
+202	val_202	val_202
+203	val_203	val_203
+203	val_203	val_203
+205	val_205	val_205
+205	val_205	val_205
+207	val_207	val_207
+207	val_207	val_207
+208	val_208	val_208
+208	val_208	val_208
+208	val_208	val_208
+209	val_209	val_209
+209	val_209	val_209
+213	val_213	val_213
+213	val_213	val_213
+214	val_214	val_214
+216	val_216	val_216
+216	val_216	val_216
+217	val_217	val_217
+217	val_217	val_217
+218	val_218	val_218
+219	val_219	val_219
+219	val_219	val_219
+221	val_221	val_221
+221	val_221	val_221
+222	val_222	val_222
+223	val_223	val_223
+223	val_223	val_223
+224	val_224	val_224
+224	val_224	val_224
+226	val_226	val_226
+228	val_228	val_228
+229	val_229	val_229
+229	val_229	val_229
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+233	val_233	val_233
+233	val_233	val_233
+235	val_235	val_235
+237	val_237	val_237
+237	val_237	val_237
+238	val_238	val_238
+238	val_238	val_238
+239	val_239	val_239
+239	val_239	val_239
+24	val_24	val_24
+24	val_24	val_24
+241	val_241	val_241
+242	val_242	val_242
+242	val_242	val_242
+244	val_244	val_244
+247	val_247	val_247
+248	val_248	val_248
+249	val_249	val_249
+252	val_252	val_252
+255	val_255	val_255
+255	val_255	val_255
+256	val_256	val_256
+256	val_256	val_256
+257	val_257	val_257
+258	val_258	val_258
+26	val_26	val_26
+26	val_26	val_26
+260	val_260	val_260
+262	val_262	val_262
+263	val_263	val_263
+265	val_265	val_265
+265	val_265	val_265
+266	val_266	val_266
+27	val_27	val_27
+272	val_272	val_272
+272	val_272	val_272
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+274	val_274	val_274
+275	val_275	val_275
+277	val_277	val_277
+277	val_277	val_277
+277	val_277	val_277
+277	val_277	val_277
+278	val_278	val_278
+278	val_278	val_278
+28	val_28	val_28
+280	val_280	val_280
+280	val_280	val_280
+281	val_281	val_281
+281	val_281	val_281
+282	val_282	val_282
+282	val_282	val_282
+283	val_283	val_283
+284	val_284	val_284
+285	val_285	val_285
+286	val_286	val_286
+287	val_287	val_287
+288	val_288	val_288
+288	val_288	val_288
+289	val_289	val_289
+291	val_291	val_291
+292	val_292	val_292
+296	val_296	val_296
+298	val_298	val_298
+298	val_298	val_298
+298	val_298	val_298
+30	val_30	val_30
+302	val_302	val_302
+305	val_305	val_305
+306	val_306	val_306
+307	val_307	val_307
+307	val_307	val_307
+308	val_308	val_308
+309	val_309	val_309
+309	val_309	val_309
+310	val_310	val_310
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+315	val_315	val_315
+316	val_316	val_316
+316	val_316	val_316
+316	val_316	val_316
+317	val_317	val_317
+317	val_317	val_317
+318	val_318	val_318
+318	val_318	val_318
+318	val_318	val_318
+321	val_321	val_321
+321	val_321	val_321
+322	val_322	val_322
+322	val_322	val_322
+323	val_323	val_323
+325	val_325	val_325
+325	val_325	val_325
+327	val_327	val_327
+327	val_327	val_327
+327	val_327	val_327
+33	val_33	val_33
+331	val_331	val_331
+331	val_331	val_331
+332	val_332	val_332
+333	val_333	val_333
+333	val_333	val_333
+335	val_335	val_335
+336	val_336	val_336
+338	val_338	val_338
+339	val_339	val_339
+34	val_34	val_34
+341	val_341	val_341
+342	val_342	val_342
+342	val_342	val_342
+344	val_344	val_344
+344	val_344	val_344
+345	val_345	val_345
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+351	val_351	val_351
+353	val_353	val_353
+353	val_353	val_353
+356	val_356	val_356
+360	val_360	val_360
+362	val_362	val_362
+364	val_364	val_364
+365	val_365	val_365
+366	val_366	val_366
+367	val_367	val_367
+367	val_367	val_367
+368	val_368	val_368
+369	val_369	val_369
+369	val_369	val_369
+369	val_369	val_369
+37	val_37	val_37
+37	val_37	val_37
+373	val_373	val_373
+374	val_374	val_374
+375	val_375	val_375
+377	val_377	val_377
+378	val_378	val_378
+379	val_379	val_379
+382	val_382	val_382
+382	val_382	val_382
+384	val_384	val_384
+384	val_384	val_384
+384	val_384	val_384
+386	val_386	val_386
+389	val_389	val_389
+392	val_392	val_392
+393	val_393	val_393
+394	val_394	val_394
+395	val_395	val_395
+395	val_395	val_395
+396	val_396	val_396
+396	val_396	val_396
+396	val_396	val_396
+397	val_397	val_397
+397	val_397	val_397
+399	val_399	val_399
+399	val_399	val_399
+4	val_4	val_4
+400	val_400	val_400
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+402	val_402	val_402
+403	val_403	val_403
+403	val_403	val_403
+403	val_403	val_403
+404	val_404	val_404
+404	val_404	val_404
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+407	val_407	val_407
+409	val_409	val_409
+409	val_409	val_409
+409	val_409	val_409
+41	val_41	val_41
+411	val_411	val_411
+413	val_413	val_413
+413	val_413	val_413
+414	val_414	val_414
+414	val_414	val_414
+417	val_417	val_417
+417	val_417	val_417
+417	val_417	val_417
+418	val_418	val_418
+419	val_419	val_419
+42	val_42	val_42
+42	val_42	val_42
+421	val_421	val_421
+424	val_424	val_424
+424	val_424	val_424
+427	val_427	val_427
+429	val_429	val_429
+429	val_429	val_429
+43	val_43	val_43
+430	val_430	val_430
+430	val_430	val_430
+430	val_430	val_430
+431	val_431	val_431
+431	val_431	val_431
+431	val_431	val_431
+432	val_432	val_432
+435	val_435	val_435
+436	val_436	val_436
+437	val_437	val_437
+438	val_438	val_438
+438	val_438	val_438
+438	val_438	val_438
+439	val_439	val_439
+439	val_439	val_439
+44	val_44	val_44
+443	val_443	val_443
+444	val_444	val_444
+446	val_446	val_446
+448	val_448	val_448
+449	val_449	val_449
+452	val_452	val_452
+453	val_453	val_453
+454	val_454	val_454
+454	val_454	val_454
+454	val_454	val_454
+455	val_455	val_455
+457	val_457	val_457
+458	val_458	val_458
+458	val_458	val_458
+459	val_459	val_459
+459	val_459	val_459
+460	val_460	val_460
+462	val_462	val_462
+462	val_462	val_462
+463	val_463	val_463
+463	val_463	val_463
+466	val_466	val_466
+466	val_466	val_466
+466	val_466	val_466
+467	val_467	val_467
+468	val_468	val_468
+468	val_468	val_468
+468	val_468	val_468
+468	val_468	val_468
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+47	val_47	val_47
+470	val_470	val_470
+472	val_472	val_472
+475	val_475	val_475
+477	val_477	val_477
+478	val_478	val_478
+478	val_478	val_478
+479	val_479	val_479
+480	val_480	val_480
+480	val_480	val_480
+480	val_480	val_480
+481	val_481	val_481
+482	val_482	val_482
+483	val_483	val_483
+484	val_484	val_484
+485	val_485	val_485
+487	val_487	val_487
+489	val_489	val_489
+489	val_489	val_489
+489	val_489	val_489
+489	val_489	val_489
+490	val_490	val_490
+491	val_491	val_491
+492	val_492	val_492
+492	val_492	val_492
+493	val_493	val_493
+494	val_494	val_494
+495	val_495	val_495
+496	val_496	val_496
+497	val_497	val_497
+498	val_498	val_498
+498	val_498	val_498
+498	val_498	val_498
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+51	val_51	val_51
+51	val_51	val_51
+53	val_53	val_53
+54	val_54	val_54
+57	val_57	val_57
+58	val_58	val_58
+58	val_58	val_58
+64	val_64	val_64
+65	val_65	val_65
+66	val_66	val_66
+67	val_67	val_67
+67	val_67	val_67
+69	val_69	val_69
+70	val_70	val_70
+70	val_70	val_70
+70	val_70	val_70
+72	val_72	val_72
+72	val_72	val_72
+74	val_74	val_74
+76	val_76	val_76
+76	val_76	val_76
+77	val_77	val_77
+78	val_78	val_78
+8	val_8	val_8
+80	val_80	val_80
+82	val_82	val_82
+83	val_83	val_83
+83	val_83	val_83
+84	val_84	val_84
+84	val_84	val_84
+85	val_85	val_85
+86	val_86	val_86
+87	val_87	val_87
+9	val_9	val_9
+90	val_90	val_90
+90	val_90	val_90
+90	val_90	val_90
+92	val_92	val_92
+95	val_95	val_95
+95	val_95	val_95
+96	val_96	val_96
+97	val_97	val_97
+97	val_97	val_97
+98	val_98	val_98
+98	val_98	val_98
+not	null	constraint
+PREHOOK: query: explain INSERT OVERWRITE TABLE table1 select src.*, value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain INSERT OVERWRITE TABLE table1 select src.*, value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean)
+                    Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.table1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                        outputColumnNames: a, b, c
+                        Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.table1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b, c
+          Column Types: string, string, string
+          Table: default.table1
+
+PREHOOK: query: INSERT OVERWRITE TABLE table1 select src.*, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table1
+POSTHOOK: query: INSERT OVERWRITE TABLE table1 select src.*, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table1
+POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.c SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM table1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM table1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table1
+#### A masked pattern was here ####
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+10	val_10	val_10
+100	val_100	val_100
+100	val_100	val_100
+103	val_103	val_103
+103	val_103	val_103
+104	val_104	val_104
+104	val_104	val_104
+105	val_105	val_105
+11	val_11	val_11
+111	val_111	val_111
+113	val_113	val_113
+113	val_113	val_113
+114	val_114	val_114
+116	val_116	val_116
+118	val_118	val_118
+118	val_118	val_118
+119	val_119	val_119
+119	val_119	val_119
+119	val_119	val_119
+12	val_12	val_12
+12	val_12	val_12
+120	val_120	val_120
+120	val_120	val_120
+125	val_125	val_125
+125	val_125	val_125
+126	val_126	val_126
+128	val_128	val_128
+128	val_128	val_128
+128	val_128	val_128
+129	val_129	val_129
+129	val_129	val_129
+131	val_131	val_131
+133	val_133	val_133
+134	val_134	val_134
+134	val_134	val_134
+136	val_136	val_136
+137	val_137	val_137
+137	val_137	val_137
+138	val_138	val_138
+138	val_138	val_138
+138	val_138	val_138
+138	val_138	val_138
+143	val_143	val_143
+145	val_145	val_145
+146	val_146	val_146
+146	val_146	val_146
+149	val_149	val_149
+149	val_149	val_149
+15	val_15	val_15
+15	val_15	val_15
+150	val_150	val_150
+152	val_152	val_152
+152	val_152	val_152
+153	val_153	val_153
+155	val_155	val_155
+156	val_156	val_156
+157	val_157	val_157
+158	val_158	val_158
+160	val_160	val_160
+162	val_162	val_162
+163	val_163	val_163
+164	val_164	val_164
+164	val_164	val_164
+165	val_165	val_165
+165	val_165	val_165
+166	val_166	val_166
+167	val_167	val_167
+167	val_167	val_167
+167	val_167	val_167
+168	val_168	val_168
+169	val_169	val_169
+169	val_169	val_169
+169	val_169	val_169
+169	val_169	val_169
+17	val_17	val_17
+170	val_170	val_170
+172	val_172	val_172
+172	val_172	val_172
+174	val_174	val_174
+174	val_174	val_174
+175	val_175	val_175
+175	val_175	val_175
+176	val_176	val_176
+176	val_176	val_176
+177	val_177	val_177
+178	val_178	val_178
+179	val_179	val_179
+179	val_179	val_179
+18	val_18	val_18
+18	val_18	val_18
+180	val_180	val_180
+181	val_181	val_181
+183	val_183	val_183
+186	val_186	val_186
+187	val_187	val_187
+187	val_187	val_187
+187	val_187	val_187
+189	val_189	val_189
+19	val_19	val_19
+190	val_190	val_190
+191	val_191	val_191
+191	val_191	val_191
+192	val_192	val_192
+193	val_193	val_193
+193	val_193	val_193
+193	val_193	val_193
+194	val_194	val_194
+195	val_195	val_195
+195	val_195	val_195
+196	val_196	val_196
+197	val_197	val_197
+197	val_197	val_197
+199	val_199	val_199
+199	val_199	val_199
+199	val_199	val_199
+2	val_2	val_2
+20	val_20	val_20
+200	val_200	val_200
+200	val_200	val_200
+201	val_201	val_201
+202	val_202	val_202
+203	val_203	val_203
+203	val_203	val_203
+205	val_205	val_205
+205	val_205	val_205
+207	val_207	val_207
+207	val_207	val_207
+208	val_208	val_208
+208	val_208	val_208
+208	val_208	val_208
+209	val_209	val_209
+209	val_209	val_209
+213	val_213	val_213
+213	val_213	val_213
+214	val_214	val_214
+216	val_216	val_216
+216	val_216	val_216
+217	val_217	val_217
+217	val_217	val_217
+218	val_218	val_218
+219	val_219	val_219
+219	val_219	val_219
+221	val_221	val_221
+221	val_221	val_221
+222	val_222	val_222
+223	val_223	val_223
+223	val_223	val_223
+224	val_224	val_224
+224	val_224	val_224
+226	val_226	val_226
+228	val_228	val_228
+229	val_229	val_229
+229	val_229	val_229
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+230	val_230	val_230
+233	val_233	val_233
+233	val_233	val_233
+235	val_235	val_235
+237	val_237	val_237
+237	val_237	val_237
+238	val_238	val_238
+238	val_238	val_238
+239	val_239	val_239
+239	val_239	val_239
+24	val_24	val_24
+24	val_24	val_24
+241	val_241	val_241
+242	val_242	val_242
+242	val_242	val_242
+244	val_244	val_244
+247	val_247	val_247
+248	val_248	val_248
+249	val_249	val_249
+252	val_252	val_252
+255	val_255	val_255
+255	val_255	val_255
+256	val_256	val_256
+256	val_256	val_256
+257	val_257	val_257
+258	val_258	val_258
+26	val_26	val_26
+26	val_26	val_26
+260	val_260	val_260
+262	val_262	val_262
+263	val_263	val_263
+265	val_265	val_265
+265	val_265	val_265
+266	val_266	val_266
+27	val_27	val_27
+272	val_272	val_272
+272	val_272	val_272
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+274	val_274	val_274
+275	val_275	val_275
+277	val_277	val_277
+277	val_277	val_277
+277	val_277	val_277
+277	val_277	val_277
+278	val_278	val_278
+278	val_278	val_278
+28	val_28	val_28
+280	val_280	val_280
+280	val_280	val_280
+281	val_281	val_281
+281	val_281	val_281
+282	val_282	val_282
+282	val_282	val_282
+283	val_283	val_283
+284	val_284	val_284
+285	val_285	val_285
+286	val_286	val_286
+287	val_287	val_287
+288	val_288	val_288
+288	val_288	val_288
+289	val_289	val_289
+291	val_291	val_291
+292	val_292	val_292
+296	val_296	val_296
+298	val_298	val_298
+298	val_298	val_298
+298	val_298	val_298
+30	val_30	val_30
+302	val_302	val_302
+305	val_305	val_305
+306	val_306	val_306
+307	val_307	val_307
+307	val_307	val_307
+308	val_308	val_308
+309	val_309	val_309
+309	val_309	val_309
+310	val_310	val_310
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+315	val_315	val_315
+316	val_316	val_316
+316	val_316	val_316
+316	val_316	val_316
+317	val_317	val_317
+317	val_317	val_317
+318	val_318	val_318
+318	val_318	val_318
+318	val_318	val_318
+321	val_321	val_321
+321	val_321	val_321
+322	val_322	val_322
+322	val_322	val_322
+323	val_323	val_323
+325	val_325	val_325
+325	val_325	val_325
+327	val_327	val_327
+327	val_327	val_327
+327	val_327	val_327
+33	val_33	val_33
+331	val_331	val_331
+331	val_331	val_331
+332	val_332	val_332
+333	val_333	val_333
+333	val_333	val_333
+335	val_335	val_335
+336	val_336	val_336
+338	val_338	val_338
+339	val_339	val_339
+34	val_34	val_34
+341	val_341	val_341
+342	val_342	val_342
+342	val_342	val_342
+344	val_344	val_344
+344	val_344	val_344
+345	val_345	val_345
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+348	val_348	val_348
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+351	val_351	val_351
+353	val_353	val_353
+353	val_353	val_353
+356	val_356	val_356
+360	val_360	val_360
+362	val_362	val_362
+364	val_364	val_364
+365	val_365	val_365
+366	val_366	val_366
+367	val_367	val_367
+367	val_367	val_367
+368	val_368	val_368
+369	val_369	val_369
+369	val_369	val_369
+369	val_369	val_369
+37	val_37	val_37
+37	val_37	val_37
+373	val_373	val_373
+374	val_374	val_374
+375	val_375	val_375
+377	val_377	val_377
+378	val_378	val_378
+379	val_379	val_379
+382	val_382	val_382
+382	val_382	val_382
+384	val_384	val_384
+384	val_384	val_384
+384	val_384	val_384
+386	val_386	val_386
+389	val_389	val_389
+392	val_392	val_392
+393	val_393	val_393
+394	val_394	val_394
+395	val_395	val_395
+395	val_395	val_395
+396	val_396	val_396
+396	val_396	val_396
+396	val_396	val_396
+397	val_397	val_397
+397	val_397	val_397
+399	val_399	val_399
+399	val_399	val_399
+4	val_4	val_4
+400	val_400	val_400
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+402	val_402	val_402
+403	val_403	val_403
+403	val_403	val_403
+403	val_403	val_403
+404	val_404	val_404
+404	val_404	val_404
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+407	val_407	val_407
+409	val_409	val_409
+409	val_409	val_409
+409	val_409	val_409
+41	val_41	val_41
+411	val_411	val_411
+413	val_413	val_413
+413	val_413	val_413
+414	val_414	val_414
+414	val_414	val_414
+417	val_417	val_417
+417	val_417	val_417
+417	val_417	val_417
+418	val_418	val_418
+419	val_419	val_419
+42	val_42	val_42
+42	val_42	val_42
+421	val_421	val_421
+424	val_424	val_424
+424	val_424	val_424
+427	val_427	val_427
+429	val_429	val_429
+429	val_429	val_429
+43	val_43	val_43
+430	val_430	val_430
+430	val_430	val_430
+430	val_430	val_430
+431	val_431	val_431
+431	val_431	val_431
+431	val_431	val_431
+432	val_432	val_432
+435	val_435	val_435
+436	val_436	val_436
+437	val_437	val_437
+438	val_438	val_438
+438	val_438	val_438
+438	val_438	val_438
+439	val_439	val_439
+439	val_439	val_439
+44	val_44	val_44
+443	val_443	val_443
+444	val_444	val_444
+446	val_446	val_446
+448	val_448	val_448
+449	val_449	val_449
+452	val_452	val_452
+453	val_453	val_453
+454	val_454	val_454
+454	val_454	val_454
+454	val_454	val_454
+455	val_455	val_455
+457	val_457	val_457
+458	val_458	val_458
+458	val_458	val_458
+459	val_459	val_459
+459	val_459	val_459
+460	val_460	val_460
+462	val_462	val_462
+462	val_462	val_462
+463	val_463	val_463
+463	val_463	val_463
+466	val_466	val_466
+466	val_466	val_466
+466	val_466	val_466
+467	val_467	val_467
+468	val_468	val_468
+468	val_468	val_468
+468	val_468	val_468
+468	val_468	val_468
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+469	val_469	val_469
+47	val_47	val_47
+470	val_470	val_470
+472	val_472	val_472
+475	val_475	val_475
+477	val_477	val_477
+478	val_478	val_478
+478	val_478	val_478
+479	val_479	val_479
+480	val_480	val_480
+480	val_480	val_480
+480	val_480	val_480
+481	val_481	val_481
+482	val_482	val_482
+483	val_483	val_483
+484	val_484	val_484
+485	val_485	val_485
+487	val_487	val_487
+489	val_489	val_489
+489	val_489	val_489
+489	val_489	val_489
+489	val_489	val_489
+490	val_490	val_490
+491	val_491	val_491
+492	val_492	val_492
+492	val_492	val_492
+493	val_493	val_493
+494	val_494	val_494
+495	val_495	val_495
+496	val_496	val_496
+497	val_497	val_497
+498	val_498	val_498
+498	val_498	val_498
+498	val_498	val_498
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+51	val_51	val_51
+51	val_51	val_51
+53	val_53	val_53
+54	val_54	val_54
+57	val_57	val_57
+58	val_58	val_58
+58	val_58	val_58
+64	val_64	val_64
+65	val_65	val_65
+66	val_66	val_66
+67	val_67	val_67
+67	val_67	val_67
+69	val_69	val_69
+70	val_70	val_70
+70	val_70	val_70
+70	val_70	val_70
+72	val_72	val_72
+72	val_72	val_72
+74	val_74	val_74
+76	val_76	val_76
+76	val_76	val_76
+77	val_77	val_77
+78	val_78	val_78
+8	val_8	val_8
+80	val_80	val_80
+82	val_82	val_82
+83	val_83	val_83
+83	val_83	val_83
+84	val_84	val_84
+84	val_84	val_84
+85	val_85	val_85
+86	val_86	val_86
+87	val_87	val_87
+9	val_9	val_9
+90	val_90	val_90
+90	val_90	val_90
+90	val_90	val_90
+92	val_92	val_92
+95	val_95	val_95
+95	val_95	val_95
+96	val_96	val_96
+97	val_97	val_97
+97	val_97	val_97
+98	val_98	val_98
+98	val_98	val_98
+PREHOOK: query: explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean)
+                    Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.table1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                        outputColumnNames: a, b, c
+                        Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.table1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b, c
+          Column Types: string, string, string
+          Table: default.table1
+
+PREHOOK: query: INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table1
+POSTHOOK: query: INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table1
+POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table1.c SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM table1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM table1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table1
+#### A masked pattern was here ####
+0	0	val_0
+0	0	val_0
+0	0	val_0
+10	10	val_10
+100	100	val_100
+100	100	val_100
+103	103	val_103
+103	103	val_103
+104	104	val_104
+104	104	val_104
+105	105	val_105
+11	11	val_11
+111	111	val_111
+113	113	val_113
+113	113	val_113
+114	114	val_114
+116	116	val_116
+118	118	val_118
+118	118	val_118
+119	119	val_119
+119	119	val_119
+119	119	val_119
+12	12	val_12
+12	12	val_12
+120	120	val_120
+120	120	val_120
+125	125	val_125
+125	125	val_125
+126	126	val_126
+128	128	val_128
+128	128	val_128
+128	128	val_128
+129	129	val_129
+129	129	val_129
+131	131	val_131
+133	133	val_133
+134	134	val_134
+134	134	val_134
+136	136	val_136
+137	137	val_137
+137	137	val_137
+138	138	val_138
+138	138	val_138
+138	138	val_138
+138	138	val_138
+143	143	val_143
+145	145	val_145
+146	146	val_146
+146	146	val_146
+149	149	val_149
+149	149	val_149
+15	15	val_15
+15	15	val_15
+150	150	val_150
+152	152	val_152
+152	152	val_152
+153	153	val_153
+155	155	val_155
+156	156	val_156
+157	157	val_157
+158	158	val_158
+160	160	val_160
+162	162	val_162
+163	163	val_163
+164	164	val_164
+164	164	val_164
+165	165	val_165
+165	165	val_165
+166	166	val_166
+167	167	val_167
+167	167	val_167
+167	167	val_167
+168	168	val_168
+169	169	val_169
+169	169	val_169
+169	169	val_169
+169	169	val_169
+17	17	val_17
+170	170	val_170
+172	172	val_172
+172	172	val_172
+174	174	val_174
+174	174	val_174
+175	175	val_175
+175	175	val_175
+176	176	val_176
+176	176	val_176
+177	177	val_177
+178	178	val_178
+179	179	val_179
+179	179	val_179
+18	18	val_18
+18	18	val_18
+180	180	val_180
+181	181	val_181
+183	183	val_183
+186	186	val_186
+187	187	val_187
+187	187	val_187
+187	187	val_187
+189	189	val_189
+19	19	val_19
+190	190	val_190
+191	191	val_191
+191	191	val_191
+192	192	val_192
+193	193	val_193
+193	193	val_193
+193	193	val_193
+194	194	val_194
+195	195	val_195
+195	195	val_195
+196	196	val_196
+197	197	val_197
+197	197	val_197
+199	199	val_199
+199	199	val_199
+199	199	val_199
+2	2	val_2
+20	20	val_20
+200	200	val_200
+200	200	val_200
+201	201	val_201
+202	202	val_202
+203	203	val_203
+203	203	val_203
+205	205	val_205
+205	205	val_205
+207	207	val_207
+207	207	val_207
+208	208	val_208
+208	208	val_208
+208	208	val_208
+209	209	val_209
+209	209	val_209
+213	213	val_213
+213	213	val_213
+214	214	val_214
+216	216	val_216
+216	216	val_216
+217	217	val_217
+217	217	val_217
+218	218	val_218
+219	219	val_219
+219	219	val_219
+221	221	val_221
+221	221	val_221
+222	222	val_222
+223	223	val_223
+223	223	val_223
+224	224	val_224
+224	224	val_224
+226	226	val_226
+228	228	val_228
+229	229	val_229
+229	229	val_229
+230	230	val_230
+230	230	val_230
+230	230	val_230
+230	230	val_230
+230	230	val_230
+233	233	val_233
+233	233	val_233
+235	235	val_235
+237	237	val_237
+237	237	val_237
+238	238	val_238
+238	238	val_238
+239	239	val_239
+239	239	val_239
+24	24	val_24
+24	24	val_24
+241	241	val_241
+242	242	val_242
+242	242	val_242
+244	244	val_244
+247	247	val_247
+248	248	val_248
+249	249	val_249
+252	252	val_252
+255	255	val_255
+255	255	val_255
+256	256	val_256
+256	256	val_256
+257	257	val_257
+258	258	val_258
+26	26	val_26
+26	26	val_26
+260	260	val_260
+262	262	val_262
+263	263	val_263
+265	265	val_265
+265	265	val_265
+266	266	val_266
+27	27	val_27
+272	272	val_272
+272	272	val_272
+273	273	val_273
+273	273	val_273
+273	273	val_273
+274	274	val_274
+275	275	val_275
+277	277	val_277
+277	277	val_277
+277	277	val_277
+277	277	val_277
+278	278	val_278
+278	278	val_278
+28	28	val_28
+280	280	val_280
+280	280	val_280
+281	281	val_281
+281	281	val_281
+282	282	val_282
+282	282	val_282
+283	283	val_283
+284	284	val_284
+285	285	val_285
+286	286	val_286
+287	287	val_287
+288	288	val_288
+288	288	val_288
+289	289	val_289
+291	291	val_291
+292	292	val_292
+296	296	val_296
+298	298	val_298
+298	298	val_298
+298	298	val_298
+30	30	val_30
+302	302	val_302
+305	305	val_305
+306	306	val_306
+307	307	val_307
+307	307	val_307
+308	308	val_308
+309	309	val_309
+309	309	val_309
+310	310	val_310
+311	311	val_311
+311	311	val_311
+311	311	val_311
+315	315	val_315
+316	316	val_316
+316	316	val_316
+316	316	val_316
+317	317	val_317
+317	317	val_317
+318	318	val_318
+318	318	val_318
+318	318	val_318
+321	321	val_321
+321	321	val_321
+322	322	val_322
+322	322	val_322
+323	323	val_323
+325	325	val_325
+325	325	val_325
+327	327	val_327
+327	327	val_327
+327	327	val_327
+33	33	val_33
+331	331	val_331
+331	331	val_331
+332	332	val_332
+333	333	val_333
+333	333	val_333
+335	335	val_335
+336	336	val_336
+338	338	val_338
+339	339	val_339
+34	34	val_34
+341	341	val_341
+342	342	val_342
+342	342	val_342
+344	344	val_344
+344	344	val_344
+345	345	val_345
+348	348	val_348
+348	348	val_348
+348	348	val_348
+348	348	val_348
+348	348	val_348
+35	35	val_35
+35	35	val_35
+35	35	val_35
+351	351	val_351
+353	353	val_353
+353	353	val_353
+356	356	val_356
+360	360	val_360
+362	362	val_362
+364	364	val_364
+365	365	val_365
+366	366	val_366
+367	367	val_367
+367	367	val_367
+368	368	val_368
+369	369	val_369
+369	369	val_369
+369	369	val_369
+37	37	val_37
+37	37	val_37
+373	373	val_373
+374	374	val_374
+375	375	val_375
+377	377	val_377
+378	378	val_378
+379	379	val_379
+382	382	val_382
+382	382	val_382
+384	384	val_384
+384	384	val_384
+384	384	val_384
+386	386	val_386
+389	389	val_389
+392	392	val_392
+393	393	val_393
+394	394	val_394
+395	395	val_395
+395	395	val_395
+396	396	val_396
+396	396	val_396
+396	396	val_396
+397	397	val_397
+397	397	val_397
+399	399	val_399
+399	399	val_399
+4	4	val_4
+400	400	val_400
+401	401	val_401
+401	401	val_401
+401	401	val_401
+401	401	val_401
+401	401	val_401
+402	402	val_402
+403	403	val_403
+403	403	val_403
+403	403	val_403
+404	404	val_404
+404	404	val_404
+406	406	val_406
+406	406	val_406
+406	406	val_406
+406	406	val_406
+407	407	val_407
+409	409	val_409
+409	409	val_409
+409	409	val_409
+41	41	val_41
+411	411	val_411
+413	413	val_413
+413	413	val_413
+414	414	val_414
+414	414	val_414
+417	417	val_417
+417	417	val_417
+417	417	val_417
+418	418	val_418
+419	419	val_419
+42	42	val_42
+42	42	val_42
+421	421	val_421
+424	424	val_424
+424	424	val_424
+427	427	val_427
+429	429	val_429
+429	429	val_429
+43	43	val_43
+430	430	val_430
+430	430	val_430
+430	430	val_430
+431	431	val_431
+431	431	val_431
+431	431	val_431
+432	432	val_432
+435	435	val_435
+436	436	val_436
+437	437	val_437
+438	438	val_438
+438	438	val_438
+438	438	val_438
+439	439	val_439
+439	439	val_439
+44	44	val_44
+443	443	val_443
+444	444	val_444
+446	446	val_446
+448	448	val_448
+449	449	val_449
+452	452	val_452
+453	453	val_453
+454	454	val_454
+454	454	val_454
+454	454	val_454
+455	455	val_455
+457	457	val_457
+458	458	val_458
+458	458	val_458
+459	459	val_459
+459	459	val_459
+460	460	val_460
+462	462	val_462
+462	462	val_462
+463	463	val_463
+463	463	val_463
+466	466	val_466
+466	466	val_466
+466	466	val_466
+467	467	val_467
+468	468	val_468
+468	468	val_468
+468	468	val_468
+468	468	val_468
+469	469	val_469
+469	469	val_469
+469	469	val_469
+469	469	val_469
+469	469	val_469
+47	47	val_47
+470	470	val_470
+472	472	val_472
+475	475	val_475
+477	477	val_477
+478	478	val_478
+478	478	val_478
+479	479	val_479
+480	480	val_480
+480	480	val_480
+480	480	val_480
+481	481	val_481
+482	482	val_482
+483	483	val_483
+484	484	val_484
+485	485	val_485
+487	487	val_487
+489	489	val_489
+489	489	val_489
+489	489	val_489
+489	489	val_489
+490	490	val_490
+491	491	val_491
+492	492	val_492
+492	492	val_492
+493	493	val_493
+494	494	val_494
+495	495	val_495
+496	496	val_496
+497	497	val_497
+498	498	val_498
+498	498	val_498
+498	498	val_498
+5	5	val_5
+5	5	val_5
+5	5	val_5
+51	51	val_51
+51	51	val_51
+53	53	val_53
+54	54	val_54
+57	57	val_57
+58	58	val_58
+58	58	val_58
+64	64	val_64
+65	65	val_65
+66	66	val_66
+67	67	val_67
+67	67	val_67
+69	69	val_69
+70	70	val_70
+70	70	val_70
+70	70	val_70
+72	72	val_72
+72	72	val_72
+74	74	val_74
+76	76	val_76
+76	76	val_76
+77	77	val_77
+78	78	val_78
+8	8	val_8
+80	80	val_80
+82	82	val_82
+83	83	val_83
+83	83	val_83
+84	84	val_84
+84	84	val_84
+85	85	val_85
+86	86	val_86
+87	87	val_87
+9	9	val_9
+90	90	val_90
+90	90	val_90
+90	90	val_90
+92	92	val_92
+95	95	val_95
+95	95	val_95
+96	96	val_96
+97	97	val_97
+97	97	val_97
+98	98	val_98
+98	98	val_98
+PREHOOK: query: DROP TABLE table1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@table1
+PREHOOK: Output: default@table1
+POSTHOOK: query: DROP TABLE table1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@table1
+POSTHOOK: Output: default@table1
+PREHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL ENABLE)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL ENABLE)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((key < 10) and enforce_constraint(key is not null)) (type: boolean)
+                    Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: a, b
+                        Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                  Filter Operator
+                    predicate: ((key < 20) and (key > 10) and enforce_constraint(value is not null)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: i, j
+                        Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b
+          Column Types: string, string
+          Table: default.src_multi1
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: i, j
+          Column Types: string, string
+          Table: default.src_multi2
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.i SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.j SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((key < 10) and enforce_constraint(key is not null)) (type: boolean)
+                    Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: a, b
+                        Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                  Filter Operator
+                    predicate: ((key < 20) and (key > 10) and enforce_constraint(value is not null)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: i, j
+                        Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll')
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b
+          Column Types: string, string
+          Table: default.src_multi1
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: i, j
+          Column Types: string, string
+          Table: default.src_multi2
+
+PREHOOK: query: from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.i SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.j SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_uami
+POSTHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_uami
+PREHOOK: query: explain insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: _dummy_table
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: array(const struct(1,109.23,'mary had a little lamb'),const struct(6553,923.19,'its fleece was white as snow')) (type: array<struct<col1:int,col2:decimal(5,2),col3:string>>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: inline
+                      Select Operator
+                        expressions: col1 (type: int), col2 (type: decimal(5,2)), col3 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Filter Operator
+                          predicate: (enforce_constraint(_col1 is not null) and enforce_constraint(_col2 is not null)) (type: boolean)
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: int), VALUE._col1 (type: decimal(5,2)), CAST( VALUE._col2 AS varchar(128)) (type: varchar(128))
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                      serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                      name: default.acid_uami
+                  Write Type: INSERT
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: varchar(128))
+                  outputColumnNames: i, de, vc
+                  Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: compute_stats(i, 'hll'), compute_stats(de, 'hll'), compute_stats(vc, 'hll')
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 1496 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 1496 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:decimal(5,2),max:decimal(5,2),countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1528 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1528 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.acid_uami
+          Write Type: INSERT
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: i, de, vc
+          Column Types: int, decimal(5,2), varchar(128)
+          Table: default.acid_uami
+
+PREHOOK: query: insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@acid_uami
+POSTHOOK: query: insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@acid_uami
+POSTHOOK: Lineage: acid_uami.de SCRIPT []
+POSTHOOK: Lineage: acid_uami.i SCRIPT []
+POSTHOOK: Lineage: acid_uami.vc SCRIPT []
+PREHOOK: query: select * from acid_uami
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_uami
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_uami
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_uami
+#### A masked pattern was here ####
+1	109.23	mary had a little lamb
+6553	923.19	its fleece was white as snow
+PREHOOK: query: --insert into select
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: --insert into select
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: UDFToInteger(key) (type: int), CAST( key AS decimal(5,2)) (type: decimal(5,2)), value (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 103500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (enforce_constraint(_col1 is not null) and enforce_constraint(_col2 is not null)) (type: boolean)
+                      Statistics: Num rows: 125 Data size: 25875 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 125 Data size: 25875 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: int), VALUE._col1 (type: decimal(5,2)), CAST( VALUE._col2 AS varchar(128)) (type: varchar(128))
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 125 Data size: 41000 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 125 Data size: 41000 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                      o

<TRUNCATED>

[4/5] hive git commit: HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by vg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index b67a03f..9ccb4e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -52,6 +52,7 @@ import org.antlr.runtime.tree.TreeVisitorAction;
 import org.antlr.runtime.tree.TreeWizard;
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -127,6 +128,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient;
 import org.apache.hadoop.hive.ql.metadata.Table;
@@ -210,6 +212,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.ResourceType;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
@@ -574,7 +577,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       HashMap<String, ASTNode> wExprsInDest = qb.getParseInfo().getWindowingExprsForClause(dest);
       int wColIdx = spec.getWindowExpressions() == null ? 0 : spec.getWindowExpressions().size();
       WindowFunctionSpec wFnSpec = processWindowFunction(wdwFn,
-        (ASTNode)wdwFn.getChild(wdwFn.getChildCount()-1));
+          (ASTNode)wdwFn.getChild(wdwFn.getChildCount()-1));
       // If this is a duplicate invocation of a function; don't add to WindowingSpec.
       if ( wExprsInDest != null &&
           wExprsInDest.containsKey(wFnSpec.getExpression().toStringTree())) {
@@ -611,8 +614,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * @throws SemanticException
    */
   private void doPhase1GetAllAggregations(ASTNode expressionTree,
-      HashMap<String, ASTNode> aggregations, List<ASTNode> wdwFns,
-      ASTNode wndParent) throws SemanticException {
+                                          HashMap<String, ASTNode> aggregations, List<ASTNode> wdwFns,
+                                          ASTNode wndParent) throws SemanticException {
     int exprTokenType = expressionTree.getToken().getType();
     if(exprTokenType == HiveParser.TOK_SUBQUERY_EXPR) {
       //since now we have scalar subqueries we can get subquery expression in having
@@ -792,9 +795,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             ErrorMsg.SAMPLE_RESTRICTION.getMsg()));
       }
       TableSample tabSample = new TableSample(
-              unescapeIdentifier(sampleClause.getChild(0).getText()),
-              unescapeIdentifier(sampleClause.getChild(1).getText()),
-              sampleCols);
+          unescapeIdentifier(sampleClause.getChild(0).getText()),
+          unescapeIdentifier(sampleClause.getChild(1).getText()),
+          sampleCols);
       qb.getParseInfo().setTabSample(alias, tabSample);
       if (unparseTranslator.isEnabled()) {
         for (ASTNode sampleCol : sampleCols) {
@@ -876,8 +879,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException {
     String inputFormat = conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ?
-      HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT):
-      HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT);
+        HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT):
+        HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT);
     if (!inputFormat.equals(CombineHiveInputFormat.class.getName())) {
       throw new SemanticException(generateErrorMessage((ASTNode) numerator,
           message + " sampling is not supported in " + inputFormat));
@@ -1164,8 +1167,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   static private boolean isOuterJoinToken(ASTNode node) {
     return (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN)
-      || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN)
-      || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN);
+        || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN)
+        || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN);
   }
 
   /**
@@ -1197,7 +1200,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         processPTF(qb, child);
         PTFInvocationSpec ptfInvocationSpec = qb.getPTFInvocationSpec(child);
         String inputAlias = ptfInvocationSpec == null ? null :
-          ptfInvocationSpec.getFunction().getAlias();;
+            ptfInvocationSpec.getFunction().getAlias();;
         if ( inputAlias == null ) {
           throw new SemanticException(generateErrorMessage(child,
               "PTF invocation in a Join must have an alias"));
@@ -1328,7 +1331,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         doPhase1GetColumnAliasesFromSelect(ast, qbp);
         qbp.setAggregationExprsForClause(ctx_1.dest, aggregations);
         qbp.setDistinctFuncExprsForClause(ctx_1.dest,
-          doPhase1GetDistinctFuncExprs(aggregations));
+            doPhase1GetDistinctFuncExprs(aggregations));
         break;
 
       case HiveParser.TOK_WHERE:
@@ -1449,7 +1452,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         break;
 
       case HiveParser.TOK_SORTBY:
-     // Get the sort by aliases - these are aliased to the entries in the
+        // Get the sort by aliases - these are aliased to the entries in the
         // select list
         queryProperties.setHasSortBy(true);
         qbp.setSortByExprForClause(ctx_1.dest, ast);
@@ -1645,17 +1648,17 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       List<String> targetColNames = new ArrayList<String>();
       for(Node col : tabColName.getChildren()) {
         assert ((ASTNode)col).getType() == HiveParser.Identifier :
-          "expected token " + HiveParser.Identifier + " found " + ((ASTNode)col).getType();
+            "expected token " + HiveParser.Identifier + " found " + ((ASTNode)col).getType();
         targetColNames.add(((ASTNode)col).getText().toLowerCase());
       }
       String fullTableName = getUnescapedName((ASTNode) ast.getChild(0).getChild(0),
-        SessionState.get().getCurrentDatabase());
+          SessionState.get().getCurrentDatabase());
       qbp.setDestSchemaForClause(ctx_1.dest, targetColNames);
       Set<String> targetColumns = new HashSet<String>();
       targetColumns.addAll(targetColNames);
       if(targetColNames.size() != targetColumns.size()) {
         throw new SemanticException(generateErrorMessage(tabColName,
-          "Duplicate column name detected in " + fullTableName + " table schema specification"));
+            "Duplicate column name detected in " + fullTableName + " table schema specification"));
       }
       Table targetTable = null;
       try {
@@ -1667,7 +1670,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
       if(targetTable == null) {
         throw new SemanticException(generateErrorMessage(ast,
-          "Unable to access metadata for table " + fullTableName));
+            "Unable to access metadata for table " + fullTableName));
       }
       for(FieldSchema f : targetTable.getCols()) {
         //parser only allows foo(a,b), not foo(foo.a, foo.b)
@@ -1701,7 +1704,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               }
               if(tokPartVal != null && tokPartVal.getType() == HiveParser.TOK_PARTVAL && tokPartVal.getChildCount() == 1) {
                 assert tokPartVal.getChild(0).getType() == HiveParser.Identifier :
-                  "Expected column name; found tokType=" + tokPartVal.getType();
+                    "Expected column name; found tokType=" + tokPartVal.getType();
                 dynamicPartitionColumns.add(tokPartVal.getChild(0).getText());
               }
             }
@@ -1713,10 +1716,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         if(!targetColumns.isEmpty()) {
           //Found some columns in user specified schema which are neither regular not dynamic partition columns
           throw new SemanticException(generateErrorMessage(tabColName,
-            "'" + (targetColumns.size() == 1 ? targetColumns.iterator().next() : targetColumns) +
-              "' in insert schema specification " + (targetColumns.size() == 1 ? "is" : "are") +
-              " not found among regular columns of " +
-              fullTableName + " nor dynamic partition columns."));
+              "'" + (targetColumns.size() == 1 ? targetColumns.iterator().next() : targetColumns) +
+                  "' in insert schema specification " + (targetColumns.size() == 1 ? "is" : "are") +
+                  " not found among regular columns of " +
+                  fullTableName + " nor dynamic partition columns."));
         }
       }
     }
@@ -1761,8 +1764,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (cte != null) {
         if (ctesExpanded.contains(cteName)) {
           throw new SemanticException("Recursive cte " + cteName +
-                  " detected (cycle: " + StringUtils.join(ctesExpanded, " -> ") +
-                  " -> " + cteName + ").");
+              " detected (cycle: " + StringUtils.join(ctesExpanded, " -> ") +
+              " -> " + cteName + ").");
         }
         cte.reference++;
         current.parents.add(cte);
@@ -1804,7 +1807,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void getMetaData(QBExpr qbexpr, ReadEntity parentInput)
-          throws HiveException {
+      throws HiveException {
     if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) {
       getMetaData(qbexpr.getQB(), parentInput);
     } else {
@@ -1815,7 +1818,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private void getMetaData(QB qb, ReadEntity parentInput)
-          throws HiveException {
+      throws HiveException {
     LOG.info("Get metadata for source tables");
 
     // Go over the tables and populate the related structures.
@@ -1847,7 +1850,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         tab = new Table(tab.getTTable().deepCopy());
       }
       if (tab == null ||
-              tab.getDbName().equals(SessionState.get().getCurrentDatabase())) {
+          tab.getDbName().equals(SessionState.get().getCurrentDatabase())) {
         Table materializedTab = ctx.getMaterializedTable(cteName);
         if (materializedTab == null) {
           // we first look for this alias from CTE, and then from catalog.
@@ -1877,7 +1880,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         }
       }
 
-     if (tab.isView()) {
+      if (tab.isView()) {
         if (qb.getParseInfo().isAnalyzeCommand()) {
           throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
         }
@@ -1901,7 +1904,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         String aliasId = getAliasId(alias, qb);
         if (aliasId != null) {
           aliasId = aliasId.replace(SemanticAnalyzer.SUBQUERY_TAG_1, "")
-            .replace(SemanticAnalyzer.SUBQUERY_TAG_2, "");
+              .replace(SemanticAnalyzer.SUBQUERY_TAG_2, "");
         }
         viewAliasToInput.put(aliasId, viewInput);
         continue;
@@ -1909,8 +1912,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
       if (!InputFormat.class.isAssignableFrom(tab.getInputFormatClass())) {
         throw new SemanticException(generateErrorMessage(
-                qb.getParseInfo().getSrcForAlias(alias),
-                ErrorMsg.INVALID_INPUT_FORMAT_TYPE.getMsg()));
+            qb.getParseInfo().getSrcForAlias(alias),
+            ErrorMsg.INVALID_INPUT_FORMAT_TYPE.getMsg()));
       }
 
       qb.getMetaData().setSrcForAlias(alias, tab);
@@ -1923,8 +1926,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec);
           } catch (HiveException e) {
             throw new SemanticException(generateErrorMessage(
-                    qb.getParseInfo().getSrcForAlias(alias),
-                    "Cannot get partitions for " + ts.partSpec), e);
+                qb.getParseInfo().getSrcForAlias(alias),
+                "Cannot get partitions for " + ts.partSpec), e);
           }
         }
 
@@ -1936,7 +1939,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // Temporary tables created during the execution are not the input sources
       if (!PlanUtils.isValuesTempTable(alias)) {
         PlanUtils.addInput(inputs,
-                new ReadEntity(tab, parentViewInfo, parentViewInfo == null),mergeIsDirect);
+            new ReadEntity(tab, parentViewInfo, parentViewInfo == null),mergeIsDirect);
       }
     }
 
@@ -1972,84 +1975,84 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     for (String name : qbp.getClauseNamesForDest()) {
       ASTNode ast = qbp.getDestForClause(name);
       switch (ast.getToken().getType()) {
-        case HiveParser.TOK_TAB: {
-          TableSpec ts = new TableSpec(db, conf, ast);
-          if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
-            throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
-          }
+      case HiveParser.TOK_TAB: {
+        TableSpec ts = new TableSpec(db, conf, ast);
+        if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
+          throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
+        }
 
-          Class<?> outputFormatClass = ts.tableHandle.getOutputFormatClass();
-          if (!ts.tableHandle.isNonNative() &&
-              !HiveOutputFormat.class.isAssignableFrom(outputFormatClass)) {
-            throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE
-                .getMsg(ast, "The class is " + outputFormatClass.toString()));
-          }
+        Class<?> outputFormatClass = ts.tableHandle.getOutputFormatClass();
+        if (!ts.tableHandle.isNonNative() &&
+            !HiveOutputFormat.class.isAssignableFrom(outputFormatClass)) {
+          throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE
+              .getMsg(ast, "The class is " + outputFormatClass.toString()));
+        }
 
-          boolean isTableWrittenTo = qb.getParseInfo().isInsertIntoTable(ts.tableHandle.getDbName(),
+        boolean isTableWrittenTo = qb.getParseInfo().isInsertIntoTable(ts.tableHandle.getDbName(),
             ts.tableHandle.getTableName());
-          isTableWrittenTo |= (qb.getParseInfo().getInsertOverwriteTables().
+        isTableWrittenTo |= (qb.getParseInfo().getInsertOverwriteTables().
             get(getUnescapedName((ASTNode) ast.getChild(0), ts.tableHandle.getDbName()).toLowerCase()) != null);
-          assert isTableWrittenTo :
+        assert isTableWrittenTo :
             "Inconsistent data structure detected: we are writing to " + ts.tableHandle  + " in " +
-              name + " but it's not in isInsertIntoTable() or getInsertOverwriteTables()";
-          // Disallow update and delete on non-acid tables
-          boolean isAcid = AcidUtils.isAcidTable(ts.tableHandle);
-          if ((updating(name) || deleting(name)) && !isAcid) {
-            // Whether we are using an acid compliant transaction manager has already been caught in
-            // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid
-            // here, it means the table itself doesn't support it.
-            throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.tableName);
+                name + " but it's not in isInsertIntoTable() or getInsertOverwriteTables()";
+        // Disallow update and delete on non-acid tables
+        boolean isAcid = AcidUtils.isAcidTable(ts.tableHandle);
+        if ((updating(name) || deleting(name)) && !isAcid) {
+          // Whether we are using an acid compliant transaction manager has already been caught in
+          // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid
+          // here, it means the table itself doesn't support it.
+          throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.tableName);
+        }
+        // TableSpec ts is got from the query (user specified),
+        // which means the user didn't specify partitions in their query,
+        // but whether the table itself is partitioned is not know.
+        if (ts.specType != SpecType.STATIC_PARTITION) {
+          // This is a table or dynamic partition
+          qb.getMetaData().setDestForAlias(name, ts.tableHandle);
+          // has dynamic as well as static partitions
+          if (ts.partSpec != null && ts.partSpec.size() > 0) {
+            qb.getMetaData().setPartSpecForAlias(name, ts.partSpec);
           }
-          // TableSpec ts is got from the query (user specified),
-          // which means the user didn't specify partitions in their query,
-          // but whether the table itself is partitioned is not know.
-          if (ts.specType != SpecType.STATIC_PARTITION) {
-            // This is a table or dynamic partition
-            qb.getMetaData().setDestForAlias(name, ts.tableHandle);
-            // has dynamic as well as static partitions
-            if (ts.partSpec != null && ts.partSpec.size() > 0) {
-              qb.getMetaData().setPartSpecForAlias(name, ts.partSpec);
-            }
-          } else {
-            // This is a partition
-            qb.getMetaData().setDestForAlias(name, ts.partHandle);
-          }
-          if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-            // Add the table spec for the destination table.
-            qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts);
-          }
-          break;
+        } else {
+          // This is a partition
+          qb.getMetaData().setDestForAlias(name, ts.partHandle);
+        }
+        if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          // Add the table spec for the destination table.
+          qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts);
         }
+        break;
+      }
 
-        case HiveParser.TOK_DIR: {
-          // This is a dfs file
-          String fname = stripQuotes(ast.getChild(0).getText());
-          if ((!qb.getParseInfo().getIsSubQ())
-              && (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_TMP_FILE)) {
+      case HiveParser.TOK_DIR: {
+        // This is a dfs file
+        String fname = stripQuotes(ast.getChild(0).getText());
+        if ((!qb.getParseInfo().getIsSubQ())
+            && (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_TMP_FILE)) {
 
-            if (qb.isCTAS() || qb.isMaterializedView()) {
-              qb.setIsQuery(false);
-              ctx.setResDir(null);
-              ctx.setResFile(null);
+          if (qb.isCTAS() || qb.isMaterializedView()) {
+            qb.setIsQuery(false);
+            ctx.setResDir(null);
+            ctx.setResFile(null);
 
-              Path location;
-              // If the CTAS query does specify a location, use the table location, else use the db location
-              if (qb.getTableDesc() != null && qb.getTableDesc().getLocation() != null) {
-                location = new Path(qb.getTableDesc().getLocation());
-              } else {
-                // allocate a temporary output dir on the location of the table
-                String tableName = getUnescapedName((ASTNode) ast.getChild(0));
-                String[] names = Utilities.getDbTableName(tableName);
-                try {
-                  Warehouse wh = new Warehouse(conf);
-                  //Use destination table's db location.
-                  String destTableDb = qb.getTableDesc() != null ? qb.getTableDesc().getDatabaseName() : null;
-                  if (destTableDb == null) {
-                    destTableDb = names[0];
-                  }
-                  location = wh.getDatabasePath(db.getDatabase(destTableDb));
-                } catch (MetaException e) {
-                  throw new SemanticException(e);
+            Path location;
+            // If the CTAS query does specify a location, use the table location, else use the db location
+            if (qb.getTableDesc() != null && qb.getTableDesc().getLocation() != null) {
+              location = new Path(qb.getTableDesc().getLocation());
+            } else {
+              // allocate a temporary output dir on the location of the table
+              String tableName = getUnescapedName((ASTNode) ast.getChild(0));
+              String[] names = Utilities.getDbTableName(tableName);
+              try {
+                Warehouse wh = new Warehouse(conf);
+                //Use destination table's db location.
+                String destTableDb = qb.getTableDesc() != null ? qb.getTableDesc().getDatabaseName() : null;
+                if (destTableDb == null) {
+                  destTableDb = names[0];
+                }
+                location = wh.getDatabasePath(db.getDatabase(destTableDb));
+              } catch (MetaException e) {
+                throw new SemanticException(e);
                 }
               }
               try {
@@ -2071,66 +2074,66 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
                 // Add the table spec for the destination table.
                 qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts);
               }
-            } else {
-              // This is the only place where isQuery is set to true; it defaults to false.
-              qb.setIsQuery(true);
-              Path stagingPath = getStagingDirectoryPathname(qb);
-              fname = stagingPath.toString();
-              ctx.setResDir(stagingPath);
-            }
+          } else {
+            // This is the only place where isQuery is set to true; it defaults to false.
+            qb.setIsQuery(true);
+            Path stagingPath = getStagingDirectoryPathname(qb);
+            fname = stagingPath.toString();
+            ctx.setResDir(stagingPath);
           }
+        }
 
-          boolean isDfsFile = true;
-          if (ast.getChildCount() >= 2 && ast.getChild(1).getText().toLowerCase().equals("local")) {
-            isDfsFile = false;
-          }
-          // Set the destination for the SELECT query inside the CTAS
-          qb.getMetaData().setDestForAlias(name, fname, isDfsFile);
-
-          CreateTableDesc directoryDesc = new CreateTableDesc();
-          boolean directoryDescIsSet = false;
-          int numCh = ast.getChildCount();
-          for (int num = 1; num < numCh ; num++){
-            ASTNode child = (ASTNode) ast.getChild(num);
-            if (child != null) {
-              if (storageFormat.fillStorageFormat(child)) {
-                directoryDesc.setOutputFormat(storageFormat.getOutputFormat());
-                directoryDesc.setSerName(storageFormat.getSerde());
-                directoryDescIsSet = true;
-                continue;
-              }
-              switch (child.getToken().getType()) {
-                case HiveParser.TOK_TABLEROWFORMAT:
-                  rowFormatParams.analyzeRowFormat(child);
-                  directoryDesc.setFieldDelim(rowFormatParams.fieldDelim);
-                  directoryDesc.setLineDelim(rowFormatParams.lineDelim);
-                  directoryDesc.setCollItemDelim(rowFormatParams.collItemDelim);
-                  directoryDesc.setMapKeyDelim(rowFormatParams.mapKeyDelim);
-                  directoryDesc.setFieldEscape(rowFormatParams.fieldEscape);
-                  directoryDesc.setNullFormat(rowFormatParams.nullFormat);
-                  directoryDescIsSet=true;
-                  break;
-                case HiveParser.TOK_TABLESERIALIZER:
-                  ASTNode serdeChild = (ASTNode) child.getChild(0);
-                  storageFormat.setSerde(unescapeSQLString(serdeChild.getChild(0).getText()));
-                  directoryDesc.setSerName(storageFormat.getSerde());
-                  if (serdeChild.getChildCount() > 1) {
-                    directoryDesc.setSerdeProps(new HashMap<String, String>());
-                    readProps((ASTNode) serdeChild.getChild(1).getChild(0), directoryDesc.getSerdeProps());
-                  }
-                  directoryDescIsSet = true;
-                  break;
+        boolean isDfsFile = true;
+        if (ast.getChildCount() >= 2 && ast.getChild(1).getText().toLowerCase().equals("local")) {
+          isDfsFile = false;
+        }
+        // Set the destination for the SELECT query inside the CTAS
+        qb.getMetaData().setDestForAlias(name, fname, isDfsFile);
+
+        CreateTableDesc directoryDesc = new CreateTableDesc();
+        boolean directoryDescIsSet = false;
+        int numCh = ast.getChildCount();
+        for (int num = 1; num < numCh ; num++){
+          ASTNode child = (ASTNode) ast.getChild(num);
+          if (child != null) {
+            if (storageFormat.fillStorageFormat(child)) {
+              directoryDesc.setOutputFormat(storageFormat.getOutputFormat());
+              directoryDesc.setSerName(storageFormat.getSerde());
+              directoryDescIsSet = true;
+              continue;
+            }
+            switch (child.getToken().getType()) {
+            case HiveParser.TOK_TABLEROWFORMAT:
+              rowFormatParams.analyzeRowFormat(child);
+              directoryDesc.setFieldDelim(rowFormatParams.fieldDelim);
+              directoryDesc.setLineDelim(rowFormatParams.lineDelim);
+              directoryDesc.setCollItemDelim(rowFormatParams.collItemDelim);
+              directoryDesc.setMapKeyDelim(rowFormatParams.mapKeyDelim);
+              directoryDesc.setFieldEscape(rowFormatParams.fieldEscape);
+              directoryDesc.setNullFormat(rowFormatParams.nullFormat);
+              directoryDescIsSet=true;
+              break;
+            case HiveParser.TOK_TABLESERIALIZER:
+              ASTNode serdeChild = (ASTNode) child.getChild(0);
+              storageFormat.setSerde(unescapeSQLString(serdeChild.getChild(0).getText()));
+              directoryDesc.setSerName(storageFormat.getSerde());
+              if (serdeChild.getChildCount() > 1) {
+                directoryDesc.setSerdeProps(new HashMap<String, String>());
+                readProps((ASTNode) serdeChild.getChild(1).getChild(0), directoryDesc.getSerdeProps());
               }
+              directoryDescIsSet = true;
+              break;
             }
           }
-          if (directoryDescIsSet){
-            qb.setDirectoryDesc(directoryDesc);
-          }
-          break;
         }
-        default:
-          throw new SemanticException(generateErrorMessage(ast,
-              "Unknown Token Type " + ast.getToken().getType()));
+        if (directoryDescIsSet){
+          qb.setDirectoryDesc(directoryDesc);
+        }
+        break;
+      }
+      default:
+        throw new SemanticException(generateErrorMessage(ast,
+            "Unknown Token Type " + ast.getToken().getType()));
       }
     }
   }
@@ -2151,7 +2154,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         }
       }
     } catch (Exception e) {
-        throw new HiveException("Unable to determine if " + path + " is encrypted: " + e, e);
+      throw new HiveException("Unable to determine if " + path + " is encrypted: " + e, e);
     }
 
     return false;
@@ -2283,12 +2286,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void replaceViewReferenceWithDefinition(QB qb, Table tab,
-      String tab_name, String alias) throws SemanticException {
+                                                  String tab_name, String alias) throws SemanticException {
 
     ASTNode viewTree;
     final ASTNodeOrigin viewOrigin = new ASTNodeOrigin("VIEW", tab.getTableName(),
         tab.getViewExpandedText(), alias, qb.getParseInfo().getSrcForAlias(
-            alias));
+        alias));
     try {
       // Reparse text, passing null for context to avoid clobbering
       // the top-level token stream.
@@ -2305,7 +2308,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       Dispatcher nodeOriginDispatcher = new Dispatcher() {
         @Override
         public Object dispatch(Node nd, java.util.Stack<Node> stack,
-            Object... nodeOutputs) {
+                               Object... nodeOutputs) {
           ((ASTNode) nd).setOrigin(viewOrigin);
           return null;
         }
@@ -2363,7 +2366,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("rawtypes")
   private String findAlias(ASTNode columnRef,
-      Map<String, Operator> aliasToOpInfo) throws SemanticException {
+                           Map<String, Operator> aliasToOpInfo) throws SemanticException {
     String colName = unescapeIdentifier(columnRef.getChild(0).getText()
         .toLowerCase());
     String tabAlias = null;
@@ -2391,9 +2394,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   void parseJoinCondPopulateAlias(QBJoinTree joinTree, ASTNode condn,
-      ArrayList<String> leftAliases, ArrayList<String> rightAliases,
-      ArrayList<String> fields,
-      Map<String, Operator> aliasToOpInfo) throws SemanticException {
+                                  ArrayList<String> leftAliases, ArrayList<String> rightAliases,
+                                  ArrayList<String> fields,
+                                  Map<String, Operator> aliasToOpInfo) throws SemanticException {
     // String[] allAliases = joinTree.getAllAliases();
     switch (condn.getToken().getType()) {
     case HiveParser.TOK_TABLE_OR_COL:
@@ -2497,8 +2500,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void populateAliases(List<String> leftAliases,
-      List<String> rightAliases, ASTNode condn, QBJoinTree joinTree,
-      List<String> leftSrc) throws SemanticException {
+                               List<String> rightAliases, ASTNode condn, QBJoinTree joinTree,
+                               List<String> leftSrc) throws SemanticException {
     if ((leftAliases.size() != 0) && (rightAliases.size() != 0)) {
       joinTree.addPostJoinFilter(condn);
       return;
@@ -2525,21 +2528,21 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * only left sources are referenced in a Predicate
    */
   void applyEqualityPredicateToQBJoinTree(QBJoinTree joinTree,
-      JoinType type,
-      List<String> leftSrc,
-      ASTNode joinCond,
-      ASTNode leftCondn,
-      ASTNode rightCondn,
-      List<String> leftCondAl1,
-      List<String> leftCondAl2,
-      List<String> rightCondAl1,
-      List<String> rightCondAl2) throws SemanticException {
+                                          JoinType type,
+                                          List<String> leftSrc,
+                                          ASTNode joinCond,
+                                          ASTNode leftCondn,
+                                          ASTNode rightCondn,
+                                          List<String> leftCondAl1,
+                                          List<String> leftCondAl2,
+                                          List<String> rightCondAl1,
+                                          List<String> rightCondAl2) throws SemanticException {
     if (leftCondAl1.size() != 0) {
       if ((rightCondAl1.size() != 0)
           || ((rightCondAl1.size() == 0) && (rightCondAl2.size() == 0))) {
         if (type.equals(JoinType.LEFTOUTER) ||
             type.equals(JoinType.FULLOUTER)) {
-            joinTree.getFilters().get(0).add(joinCond);
+          joinTree.getFilters().get(0).add(joinCond);
         } else {
           /*
            * If the rhs references table sources and this QBJoinTree has a leftTree;
@@ -2623,7 +2626,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           || ((rightCondAl1.size() == 0) && (rightCondAl2.size() == 0))) {
         if (type.equals(JoinType.RIGHTOUTER)
             || type.equals(JoinType.FULLOUTER)) {
-            joinTree.getFilters().get(1).add(joinCond);
+          joinTree.getFilters().get(1).add(joinCond);
         } else {
           joinTree.getFiltersForPushing().get(1).add(joinCond);
         }
@@ -2638,14 +2641,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     } else if (rightCondAl1.size() != 0) {
       if (type.equals(JoinType.LEFTOUTER)
           || type.equals(JoinType.FULLOUTER)) {
-          joinTree.getFilters().get(0).add(joinCond);
+        joinTree.getFilters().get(0).add(joinCond);
       } else {
         joinTree.getFiltersForPushing().get(0).add(joinCond);
       }
     } else {
       if (type.equals(JoinType.RIGHTOUTER)
           || type.equals(JoinType.FULLOUTER)) {
-          joinTree.getFilters().get(1).add(joinCond);
+        joinTree.getFilters().get(1).add(joinCond);
       } else if (type.equals(JoinType.LEFTSEMI)) {
         joinTree.getExpressions().get(0).add(leftCondn);
         joinTree.getExpressions().get(1).add(rightCondn);
@@ -2661,7 +2664,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("rawtypes")
   private void parseJoinCondition(QBJoinTree joinTree, ASTNode joinCond, List<String> leftSrc,
-      Map<String, Operator> aliasToOpInfo)
+                                  Map<String, Operator> aliasToOpInfo)
       throws SemanticException {
     if (joinCond == null) {
       return;
@@ -2700,8 +2703,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("rawtypes")
   private void parseJoinCondition(QBJoinTree joinTree, ASTNode joinCond,
-      List<String> leftSrc, JoinType type,
-      Map<String, Operator> aliasToOpInfo) throws SemanticException {
+                                  List<String> leftSrc, JoinType type,
+                                  Map<String, Operator> aliasToOpInfo) throws SemanticException {
     if (joinCond == null) {
       return;
     }
@@ -2797,14 +2800,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         if (!leftAliasNull) {
           if (type.equals(JoinType.LEFTOUTER)
               || type.equals(JoinType.FULLOUTER)) {
-              joinTree.getFilters().get(0).add(joinCond);
+            joinTree.getFilters().get(0).add(joinCond);
           } else {
             joinTree.getFiltersForPushing().get(0).add(joinCond);
           }
         } else {
           if (type.equals(JoinType.RIGHTOUTER)
               || type.equals(JoinType.FULLOUTER)) {
-              joinTree.getFilters().get(1).add(joinCond);
+            joinTree.getFilters().get(1).add(joinCond);
           } else {
             joinTree.getFiltersForPushing().get(1).add(joinCond);
           }
@@ -2817,7 +2820,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("rawtypes")
   private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, QB qb, String dest, ASTNode predicate,
-      Map<String, Operator> aliasToOpInfo) throws SemanticException {
+                                               Map<String, Operator> aliasToOpInfo) throws SemanticException {
 
     switch (predicate.getType()) {
     case HiveParser.KW_AND:
@@ -2834,7 +2837,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       ArrayList<String> leftCondAl2 = new ArrayList<String>();
       try {
         parseJoinCondPopulateAlias(joinTree, leftCondn, leftCondAl1, leftCondAl2,
-          null, aliasToOpInfo);
+            null, aliasToOpInfo);
       } catch(SemanticException se) {
         // suppress here; if it is a real issue will get caught in where clause handling.
         return;
@@ -2884,7 +2887,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   public <T extends OperatorDesc> Operator<T> putOpInsertMap(Operator<T> op,
-      RowResolver rr) {
+                                                             RowResolver rr) {
     OpParseContext ctx = new OpParseContext(rr);
     opParseCtx.put(op, ctx);
     op.augmentPlan();
@@ -2893,7 +2896,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private Operator genHavingPlan(String dest, QB qb, Operator input,
-      Map<String, Operator> aliasToOpInfo)
+                                 Map<String, Operator> aliasToOpInfo)
       throws SemanticException {
 
     ASTNode havingExpr = qb.getParseInfo().getHavingForClause(dest);
@@ -2912,8 +2915,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // If CBO did not optimize the query, we might need to replace grouping function
       final String destClauseName = qb.getParseInfo().getClauseNames().iterator().next();
       final boolean cubeRollupGrpSetPresent = (!qb.getParseInfo().getDestRollups().isEmpty()
-              || !qb.getParseInfo().getDestGroupingSets().isEmpty()
-              || !qb.getParseInfo().getDestCubes().isEmpty());
+          || !qb.getParseInfo().getDestGroupingSets().isEmpty()
+          || !qb.getParseInfo().getDestCubes().isEmpty());
       // Special handling of grouping function
       condn = rewriteGroupingFunctionAST(getGroupByForClause(qb.getParseInfo(), destClauseName), condn,
           !cubeRollupGrpSetPresent);
@@ -2930,7 +2933,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   protected static ASTNode rewriteGroupingFunctionAST(final List<ASTNode> grpByAstExprs, ASTNode targetNode,
-          final boolean noneSet) throws SemanticException {
+                                                      final boolean noneSet) throws SemanticException {
 
     TreeVisitorAction action = new TreeVisitorAction() {
 
@@ -2953,9 +2956,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             ASTNode newRoot = new ASTNode();
             // Rewritten grouping function
             ASTNode groupingFunc = (ASTNode) ParseDriver.adaptor.create(
-                    HiveParser.Identifier, "grouping");
+                HiveParser.Identifier, "grouping");
             ParseDriver.adaptor.addChild(groupingFunc, ParseDriver.adaptor.create(
-                    HiveParser.Identifier, "rewritten"));
+                HiveParser.Identifier, "rewritten"));
             newRoot.addChild(groupingFunc);
             // Grouping ID reference
             ASTNode childGroupingID;
@@ -2963,13 +2966,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               // Query does not contain CUBE, ROLLUP, or GROUPING SETS, and thus,
               // grouping should return 0
               childGroupingID = (ASTNode) ParseDriver.adaptor.create(HiveParser.IntegralLiteral,
-                    String.valueOf(0));
+                  String.valueOf(0));
             } else {
               // We refer to grouping_id column
               childGroupingID = (ASTNode) ParseDriver.adaptor.create(
-                      HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
+                  HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
               ParseDriver.adaptor.addChild(childGroupingID, ParseDriver.adaptor.create(
-                      HiveParser.Identifier, VirtualColumn.GROUPINGID.getName()));
+                  HiveParser.Identifier, VirtualColumn.GROUPINGID.getName()));
             }
             newRoot.addChild(childGroupingID);
             // Indices
@@ -2981,7 +2984,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
                   // Create and add AST node with position of grouping function input
                   // in group by clause
                   ASTNode childN = (ASTNode) ParseDriver.adaptor.create(HiveParser.IntegralLiteral,
-                          String.valueOf(IntMath.mod(-j-1, grpByAstExprs.size())));
+                      String.valueOf(IntMath.mod(-j-1, grpByAstExprs.size())));
                   newRoot.addChild(childN);
                   break;
                 }
@@ -3013,8 +3016,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private Operator genFilterPlan(ASTNode searchCond, QB qb, Operator input,
-      Map<String, Operator> aliasToOpInfo,
-      boolean forHavingClause, boolean forGroupByClause)
+                                 Map<String, Operator> aliasToOpInfo,
+                                 boolean forHavingClause, boolean forGroupByClause)
       throws SemanticException {
 
     OpParseContext inputCtx = opParseCtx.get(input);
@@ -3114,7 +3117,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         if ( subQuery.getOperator().getType() != SubQueryType.EXISTS &&
             subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS &&
             sqRR.getColumnInfos().size() -
-               subQuery.getNumOfCorrelationExprsAddedToSQSelect() > 1 ) {
+                subQuery.getNumOfCorrelationExprsAddedToSQSelect() > 1 ) {
           throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
               subQueryAST, "SubQuery can contain only 1 item in Select List."));
         }
@@ -3205,7 +3208,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * every non nullSafe predicate.
    */
   private Operator genNotNullFilterForJoinSourcePlan(QB qb, Operator input,
-      QBJoinTree joinTree, ExprNodeDesc[] joinKeys) throws SemanticException {
+                                                     QBJoinTree joinTree, ExprNodeDesc[] joinKeys) throws SemanticException {
 
     if (qb == null || joinTree == null) {
       return input;
@@ -3226,7 +3229,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     List<Boolean> nullSafes = joinTree.getNullSafes();
     for (int i = 0; i < joinKeys.length; i++) {
       if (nullSafes.get(i) || (joinKeys[i] instanceof ExprNodeColumnDesc &&
-         ((ExprNodeColumnDesc)joinKeys[i]).getIsPartitionColOrVirtualCol())) {
+          ((ExprNodeColumnDesc)joinKeys[i]).getIsPartitionColOrVirtualCol())) {
         // no need to generate is not null predicate for partitioning or
         // virtual column, since those columns can never be null.
         continue;
@@ -3273,11 +3276,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   @SuppressWarnings("nls")
-  // TODO: make aliases unique, otherwise needless rewriting takes place
+    // TODO: make aliases unique, otherwise needless rewriting takes place
   Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel,
-    ArrayList<ExprNodeDesc> col_list, HashSet<ColumnInfo> excludeCols, RowResolver input,
-    RowResolver colSrcRR, Integer pos, RowResolver output, List<String> aliases,
-    boolean ensureUniqueCols) throws SemanticException {
+                          ArrayList<ExprNodeDesc> col_list, HashSet<ColumnInfo> excludeCols, RowResolver input,
+                          RowResolver colSrcRR, Integer pos, RowResolver output, List<String> aliases,
+                          boolean ensureUniqueCols) throws SemanticException {
 
     if (colSrcRR == null) {
       colSrcRR = input;
@@ -3514,7 +3517,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private TableDesc getTableDescFromSerDe(ASTNode child, String cols,
-      String colTypes, boolean defaultCols) throws SemanticException {
+                                          String colTypes, boolean defaultCols) throws SemanticException {
     if (child.getType() == HiveParser.TOK_SERDENAME) {
       String serdeName = unescapeSQLString(child.getChild(0).getText());
       Class<? extends Deserializer> serdeClass = null;
@@ -3723,7 +3726,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // Input and Output Serdes
     if (trfm.getChild(inputSerDeNum).getChildCount() > 0) {
       inInfo = getTableDescFromSerDe((ASTNode) (((ASTNode) trfm
-          .getChild(inputSerDeNum))).getChild(0), inpColumns.toString(),
+              .getChild(inputSerDeNum))).getChild(0), inpColumns.toString(),
           inpColumnTypes.toString(), false);
     } else {
       inInfo = PlanUtils.getTableDesc(serde, Integer
@@ -3733,7 +3736,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
     if (trfm.getChild(outputSerDeNum).getChildCount() > 0) {
       outInfo = getTableDescFromSerDe((ASTNode) (((ASTNode) trfm
-          .getChild(outputSerDeNum))).getChild(0), columns.toString(),
+              .getChild(outputSerDeNum))).getChild(0), columns.toString(),
           columnTypes.toString(), false);
       // This is for backward compatibility. If the user did not specify the
       // output column list, we assume that there are 2 columns: key and value.
@@ -3857,7 +3860,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   protected List<Integer> getGroupingSets(List<ASTNode> groupByExpr, QBParseInfo parseInfo,
-      String dest) throws SemanticException {
+                                          String dest) throws SemanticException {
     Map<String, Integer> exprPos = new HashMap<String, Integer>();
     for (int i = 0; i < groupByExpr.size(); ++i) {
       ASTNode node = groupByExpr.get(i);
@@ -3947,7 +3950,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   static String[] getColAlias(ASTNode selExpr, String defaultName,
-      RowResolver inputRR, boolean includeFuncName, int colNum) {
+                              RowResolver inputRR, boolean includeFuncName, int colNum) {
     String colAlias = null;
     String tabAlias = null;
     String[] colRef = new String[2];
@@ -3955,7 +3958,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     //for queries with a windowing expressions, the selexpr may have a third child
     if (selExpr.getChildCount() == 2 ||
         (selExpr.getChildCount() == 3 &&
-        selExpr.getChild(2).getType() == HiveParser.TOK_WINDOWSPEC)) {
+            selExpr.getChild(2).getType() == HiveParser.TOK_WINDOWSPEC)) {
       // return zz for "xx + yy AS zz"
       colAlias = unescapeIdentifier(selExpr.getChild(1).getText().toLowerCase());
       colRef[0] = tabAlias;
@@ -4038,7 +4041,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
 
   private Operator<?> genSelectPlan(String dest, QB qb, Operator<?> input,
-      Operator<?> inputForSelectStar) throws SemanticException {
+                                    Operator<?> inputForSelectStar) throws SemanticException {
     ASTNode selExprList = qb.getParseInfo().getSelForClause(dest);
     Operator<?> op = genSelectPlan(dest, selExprList, qb, input, inputForSelectStar, false);
 
@@ -4051,7 +4054,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private Operator<?> genSelectPlan(String dest, ASTNode selExprList, QB qb, Operator<?> input,
-      Operator<?> inputForSelectStar, boolean outerLV) throws SemanticException {
+                                    Operator<?> inputForSelectStar, boolean outerLV) throws SemanticException {
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("tree: " + selExprList.toStringTree());
@@ -4174,8 +4177,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     final boolean cubeRollupGrpSetPresent = (!qb.getParseInfo().getDestRollups().isEmpty()
-            || !qb.getParseInfo().getDestGroupingSets().isEmpty()
-            || !qb.getParseInfo().getDestCubes().isEmpty());
+        || !qb.getParseInfo().getDestGroupingSets().isEmpty()
+        || !qb.getParseInfo().getDestCubes().isEmpty());
     Set<String> colAliases = new HashSet<String>();
     ASTNode[] exprs = new ASTNode[exprList.getChildCount()];
     String[][] aliases = new String[exprList.getChildCount()][];
@@ -4229,7 +4232,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (expr.getType() == HiveParser.TOK_ALLCOLREF) {
         int initPos = pos;
         pos = genColListRegex(".*", expr.getChildCount() == 0 ? null
-            : getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(),
+                : getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(),
             expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
         if (unparseTranslator.isEnabled()) {
           offset += pos - initPos - 1;
@@ -4246,7 +4249,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       } else if (expr.getType() == HiveParser.DOT
           && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL
           && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0)
-              .getChild(0).getText().toLowerCase())) && !hasAsClause
+          .getChild(0).getText().toLowerCase())) && !hasAsClause
           && !inputRR.getIsExprResolver()
           && isRegex(unescapeIdentifier(expr.getChild(1).getText()), conf)) {
         // In case the expression is TABLE.COL (col can be regex).
@@ -4254,7 +4257,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         // We don't allow this for ExprResolver - the Group By case
         pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()),
             unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()),
-             expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
+            expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
       } else {
         // Case when this is an expression
         TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR, true, isCBOExecuted());
@@ -4341,8 +4344,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * @throws SemanticException
    */
   public RowResolver handleInsertStatementSpec(List<ExprNodeDesc> col_list, String dest,
-                                         RowResolver outputRR, RowResolver inputRR, QB qb,
-                                         ASTNode selExprList) throws SemanticException {
+                                               RowResolver outputRR, RowResolver inputRR, QB qb,
+                                               ASTNode selExprList) throws SemanticException {
     //(z,x)
     List<String> targetTableSchema = qb.getParseInfo().getDestSchemaForClause(dest);//specified in the query
     if(targetTableSchema == null) {
@@ -4353,9 +4356,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       Table target = qb.getMetaData().getDestTableForAlias(dest);
       Partition partition = target == null ? qb.getMetaData().getDestPartitionForAlias(dest) : null;
       throw new SemanticException(generateErrorMessage(selExprList,
-        "Expected " + targetTableSchema.size() + " columns for " + dest +
-          (target != null ? "/" + target.getCompleteName() : (partition != null ? "/" + partition.getCompleteName() : "")) +
-          "; select produces " + col_list.size() + " columns"));
+          "Expected " + targetTableSchema.size() + " columns for " + dest +
+              (target != null ? "/" + target.getCompleteName() : (partition != null ? "/" + partition.getCompleteName() : "")) +
+              "; select produces " + col_list.size() + " columns"));
     }
     //e.g. map z->expr for a
     Map<String, ExprNodeDesc> targetCol2Projection = new HashMap<String, ExprNodeDesc>();
@@ -4370,7 +4373,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     Partition partition = target == null ? qb.getMetaData().getDestPartitionForAlias(dest) : null;
     if(target == null && partition == null) {
       throw new SemanticException(generateErrorMessage(selExprList,
-        "No table/partition found in QB metadata for dest='" + dest + "'"));
+          "No table/partition found in QB metadata for dest='" + dest + "'"));
     }
     ArrayList<ExprNodeDesc> new_col_list = new ArrayList<ExprNodeDesc>();
     colListPos = 0;
@@ -4410,7 +4413,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         new_col_list.add(exp);
         final String tableAlias = null;//this column doesn't come from any table
         ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(colListPos),
-          exp.getWritableObjectInspector(), tableAlias, false);
+            exp.getWritableObjectInspector(), tableAlias, false);
         newOutputRR.put(colInfo.getTabAlias(), colInfo.getInternalName(), colInfo);
       }
       colListPos++;
@@ -4486,8 +4489,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * for each GroupBy aggregation.
    */
   public static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName,
-      ArrayList<ExprNodeDesc> aggParameters, ASTNode aggTree,
-      boolean isDistinct, boolean isAllColumns)
+                                                             ArrayList<ExprNodeDesc> aggParameters, ASTNode aggTree,
+                                                             boolean isDistinct, boolean isAllColumns)
       throws SemanticException {
     ArrayList<ObjectInspector> originalParameterTypeInfos =
         getWritableObjectInspector(aggParameters);
@@ -4514,7 +4517,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    *           when the UDAF is not found or has problems.
    */
   public static GenericUDAFInfo getGenericUDAFInfo(GenericUDAFEvaluator evaluator,
-      GenericUDAFEvaluator.Mode emode, ArrayList<ExprNodeDesc> aggParameters)
+                                                   GenericUDAFEvaluator.Mode emode, ArrayList<ExprNodeDesc> aggParameters)
       throws SemanticException {
 
     GenericUDAFInfo r = new GenericUDAFInfo();
@@ -4586,7 +4589,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    *         a constant parameter; otherwise, return null
    */
   public static ExprNodeDesc isConstantParameterInAggregationParameters(String internalName,
-      List<ExprNodeDesc> reduceValues) {
+                                                                        List<ExprNodeDesc> reduceValues) {
     // only the pattern of "VALUE._col([0-9]+)" should be handled.
 
     String[] terms = internalName.split("\\.");
@@ -4624,8 +4627,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo,
-      String dest, Operator input, ReduceSinkOperator rs, GroupByDesc.Mode mode,
-      Map<String, GenericUDAFEvaluator> genericUDAFEvaluators)
+                                                 String dest, Operator input, ReduceSinkOperator rs, GroupByDesc.Mode mode,
+                                                 Map<String, GenericUDAFEvaluator> genericUDAFEvaluators)
       throws SemanticException {
     RowResolver groupByInputRowResolver = opParseCtx
         .get(input).getRowResolver();
@@ -4758,10 +4761,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   // This function is called for GroupBy2 to pass the additional grouping keys introduced by
   // GroupBy1 for the grouping set (corresponding to the rollup).
   private void addGroupingSetKey(List<ExprNodeDesc> groupByKeys,
-      RowResolver groupByInputRowResolver,
-      RowResolver groupByOutputRowResolver,
-      List<String> outputColumnNames,
-      Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
+                                 RowResolver groupByInputRowResolver,
+                                 RowResolver groupByOutputRowResolver,
+                                 List<String> outputColumnNames,
+                                 Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
     // For grouping sets, add a dummy grouping key
     String groupingSetColumnName =
         groupByInputRowResolver.get(null, VirtualColumn.GROUPINGID.getName()).getInternalName();
@@ -4789,10 +4792,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   // This function is called for ReduceSink to add the additional grouping keys introduced by
   // GroupBy1 into the reduce keys.
   private void processGroupingSetReduceSinkOperator(RowResolver reduceSinkInputRowResolver,
-      RowResolver reduceSinkOutputRowResolver,
-      List<ExprNodeDesc> reduceKeys,
-      List<String> outputKeyColumnNames,
-      Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
+                                                    RowResolver reduceSinkOutputRowResolver,
+                                                    List<ExprNodeDesc> reduceKeys,
+                                                    List<String> outputKeyColumnNames,
+                                                    Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
     // add a key for reduce sink
     String groupingSetColumnName =
         reduceSinkInputRowResolver.get(null, VirtualColumn.GROUPINGID.getName()).getInternalName();
@@ -4832,11 +4835,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanGroupByOperator1(QBParseInfo parseInfo,
-      String dest, Operator reduceSinkOperatorInfo, GroupByDesc.Mode mode,
-      Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
-      List<Integer> groupingSets,
-      boolean groupingSetsPresent,
-      boolean groupingSetsNeedAdditionalMRJob) throws SemanticException {
+                                                  String dest, Operator reduceSinkOperatorInfo, GroupByDesc.Mode mode,
+                                                  Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
+                                                  List<Integer> groupingSets,
+                                                  boolean groupingSetsPresent,
+                                                  boolean groupingSetsNeedAdditionalMRJob) throws SemanticException {
     ArrayList<String> outputColumnNames = new ArrayList<String>();
     RowResolver groupByInputRowResolver = opParseCtx
         .get(reduceSinkOperatorInfo).getRowResolver();
@@ -4974,7 +4977,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         assert (paraExpression != null);
         aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(),
             paraExpression, paraExprInfo.getTabAlias(), paraExprInfo
-                .getIsVirtualCol()));
+            .getIsVirtualCol()));
       }
       if (isDistinct) {
         numDistinctUDFs++;
@@ -5023,9 +5026,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * creates 'n' rows per input row, where 'n' is the number of grouping sets.
    */
   private void createNewGroupingKey(List<ExprNodeDesc> groupByKeys,
-      List<String> outputColumnNames,
-      RowResolver groupByOutputRowResolver,
-      Map<String, ExprNodeDesc> colExprMap) {
+                                    List<String> outputColumnNames,
+                                    RowResolver groupByOutputRowResolver,
+                                    Map<String, ExprNodeDesc> colExprMap) {
     // The value for the constant does not matter. It is replaced by the grouping set
     // value for the actual implementation
     ExprNodeConstantDesc constant = new ExprNodeConstantDesc(0);
@@ -5056,13 +5059,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanMapGroupByOperator(QB qb,
-      String dest,
-      List<ASTNode> grpByExprs,
-      Operator inputOperatorInfo,
-      GroupByDesc.Mode mode,
-      Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
-      List<Integer> groupingSetKeys,
-      boolean groupingSetsPresent) throws SemanticException {
+                                                    String dest,
+                                                    List<ASTNode> grpByExprs,
+                                                    Operator inputOperatorInfo,
+                                                    GroupByDesc.Mode mode,
+                                                    Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
+                                                    List<Integer> groupingSetKeys,
+                                                    boolean groupingSetsPresent) throws SemanticException {
 
     RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo)
         .getRowResolver();
@@ -5206,14 +5209,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb,
-      String dest,
-      Operator inputOperatorInfo,
-      List<ASTNode> grpByExprs,
-      int numPartitionFields,
-      boolean changeNumPartitionFields,
-      int numReducers,
-      boolean mapAggrDone,
-      boolean groupingSetsPresent) throws SemanticException {
+                                                              String dest,
+                                                              Operator inputOperatorInfo,
+                                                              List<ASTNode> grpByExprs,
+                                                              int numPartitionFields,
+                                                              boolean changeNumPartitionFields,
+                                                              int numReducers,
+                                                              boolean mapAggrDone,
+                                                              boolean groupingSetsPresent) throws SemanticException {
 
     RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo)
         .getRowResolver();
@@ -5295,8 +5298,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private ArrayList<ExprNodeDesc> getReduceKeysForReduceSink(List<ASTNode> grpByExprs, String dest,
-      RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver,
-      List<String> outputKeyColumnNames, Map<String, ExprNodeDesc> colExprMap)
+                                                             RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver,
+                                                             List<String> outputKeyColumnNames, Map<String, ExprNodeDesc> colExprMap)
       throws SemanticException {
 
     ArrayList<ExprNodeDesc> reduceKeys = new ArrayList<ExprNodeDesc>();
@@ -5332,10 +5335,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private List<List<Integer>> getDistinctColIndicesForReduceSink(QBParseInfo parseInfo,
-      String dest,
-      List<ExprNodeDesc> reduceKeys, RowResolver reduceSinkInputRowResolver,
-      RowResolver reduceSinkOutputRowResolver, List<String> outputKeyColumnNames,
-      Map<String, ExprNodeDesc> colExprMap)
+                                                                 String dest,
+                                                                 List<ExprNodeDesc> reduceKeys, RowResolver reduceSinkInputRowResolver,
+                                                                 RowResolver reduceSinkOutputRowResolver, List<String> outputKeyColumnNames,
+                                                                 Map<String, ExprNodeDesc> colExprMap)
       throws SemanticException {
 
     List<List<Integer>> distinctColIndices = new ArrayList<List<Integer>>();
@@ -5384,9 +5387,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void getReduceValuesForReduceSinkNoMapAgg(QBParseInfo parseInfo, String dest,
-      RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver,
-      List<String> outputValueColumnNames, ArrayList<ExprNodeDesc> reduceValues,
-      Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
+                                                    RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver,
+                                                    List<String> outputValueColumnNames, ArrayList<ExprNodeDesc> reduceValues,
+                                                    Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
     HashMap<String, ASTNode> aggregationTrees = parseInfo
         .getAggregationExprsForClause(dest);
 
@@ -5414,7 +5417,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private ReduceSinkOperator genCommonGroupByPlanReduceSinkOperator(QB qb, List<String> dests,
-      Operator inputOperatorInfo) throws SemanticException {
+                                                                    Operator inputOperatorInfo) throws SemanticException {
 
     RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo)
         .getRowResolver();
@@ -5503,7 +5506,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   // from mapping if it's already on RS keys.
   // Remaining column expressions would be a candidate for an RS value
   private void removeMappingForKeys(ASTNode predicate, Map<ASTNode, ExprNodeDesc> mapping,
-      List<ExprNodeDesc> keys) {
+                                    List<ExprNodeDesc> keys) {
     ExprNodeDesc expr = mapping.get(predicate);
     if (expr != null && ExprNodeDescUtils.indexOf(expr, keys) >= 0) {
       removeRecursively(predicate, mapping);
@@ -5540,11 +5543,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanReduceSinkOperator2MR(QBParseInfo parseInfo,
-      String dest,
-      Operator groupByOperatorInfo,
-      int numPartitionFields,
-      int numReducers,
-      boolean groupingSetsPresent) throws SemanticException {
+                                                       String dest,
+                                                       Operator groupByOperatorInfo,
+                                                       int numPartitionFields,
+                                                       int numReducers,
+                                                       boolean groupingSetsPresent) throws SemanticException {
     RowResolver reduceSinkInputRowResolver2 = opParseCtx.get(
         groupByOperatorInfo).getRowResolver();
     RowResolver reduceSinkOutputRowResolver2 = new RowResolver();
@@ -5628,11 +5631,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo,
-      String dest,
-      Operator reduceSinkOperatorInfo2,
-      GroupByDesc.Mode mode,
-      Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
-      boolean groupingSetsPresent) throws SemanticException {
+                                                    String dest,
+                                                    Operator reduceSinkOperatorInfo2,
+                                                    GroupByDesc.Mode mode,
+                                                    Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
+                                                    boolean groupingSetsPresent) throws SemanticException {
 
     RowResolver groupByInputRowResolver2 = opParseCtx.get(
         reduceSinkOperatorInfo2).getRowResolver();
@@ -5688,7 +5691,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       assert (paraExpression != null);
       aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(),
           paraExpression, paraExprInfo.getTabAlias(), paraExprInfo
-              .getIsVirtualCol()));
+          .getIsVirtualCol()));
 
       String aggName = unescapeIdentifier(value.getChild(0).getText());
 
@@ -5707,7 +5710,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               udaf.genericUDAFEvaluator,
               udaf.convertedParameters,
               (mode != GroupByDesc.Mode.FINAL && value.getToken().getType() ==
-              HiveParser.TOK_FUNCTIONDI),
+                  HiveParser.TOK_FUNCTIONDI),
               amode));
       String field = getColumnInternalName(groupByKeys.size()
           + aggregations.size() - 1);
@@ -5797,7 +5800,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings({"nls"})
   private Operator genGroupByPlan1ReduceMultiGBY(List<String> dests, QB qb, Operator input,
-      Map<String, Operator> aliasToOpInfo)
+                                                 Map<String, Operator> aliasToOpInfo)
       throws SemanticException {
 
     QBParseInfo parseInfo = qb.getParseInfo();
@@ -5861,7 +5864,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       orFilterDesc.setGenerated(true);
 
       selectInput = putOpInsertMap(OperatorFactory.getAndMakeChild(orFilterDesc, new RowSchema(
-              inputRR.getColumnInfos()), input), inputRR);
+          inputRR.getColumnInfos()), input), inputRR);
     }
 
     // insert a select operator here used by the ColumnPruner to reduce
@@ -6025,7 +6028,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   static private void extractColumns(Set<String> colNamesExprs,
-      ExprNodeDesc exprNode) throws SemanticException {
+                                     ExprNodeDesc exprNode) throws SemanticException {
     if (exprNode instanceof ExprNodeColumnDesc) {
       colNamesExprs.add(((ExprNodeColumnDesc) exprNode).getColumn());
       return;
@@ -6050,9 +6053,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   void checkExpressionsForGroupingSet(List<ASTNode> grpByExprs,
-      List<ASTNode> distinctGrpByExprs,
-      Map<String, ASTNode> aggregationTrees,
-      RowResolver inputRowResolver) throws SemanticException {
+                                      List<ASTNode> distinctGrpByExprs,
+                                      Map<String, ASTNode> aggregationTrees,
+                                      RowResolver inputRowResolver) throws SemanticException {
 
     Set<String> colNamesGroupByExprs = new HashSet<String>();
     Set<String> colNamesGroupByDistinctExprs = new HashSet<String>();
@@ -6149,7 +6152,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanMapAggrNoSkew(String dest, QB qb,
-      Operator inputOperatorInfo) throws SemanticException {
+                                               Operator inputOperatorInfo) throws SemanticException {
 
     QBParseInfo parseInfo = qb.getParseInfo();
     ObjectPair<List<ASTNode>, List<Integer>> grpByExprsGroupingSets =
@@ -6317,7 +6320,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanMapAggr2MR(String dest, QB qb,
-      Operator inputOperatorInfo) throws SemanticException {
+                                            Operator inputOperatorInfo) throws SemanticException {
 
     QBParseInfo parseInfo = qb.getParseInfo();
 
@@ -6501,7 +6504,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   @SuppressWarnings("nls")
   private Operator genBucketingSortingDest(String dest, Operator input, QB qb,
-      TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
+                                           TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
 
     // If the table is bucketed, and bucketing is enforced, do the following:
     // If the number of buckets is smaller than the number of maximum reducers,
@@ -6571,7 +6574,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z');
       }
       input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(),
-              maxReducers, (AcidUtils.isAcidTable(dest_tab) ?
+          maxReducers, (AcidUtils.isAcidTable(dest_tab) ?
               getAcidType(table_desc.getOutputFileFormatClass(), dest) : AcidUtils.Operation.NOT_ACID));
       reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0));
       ctx.setMultiFileSpray(multiFileSpray);
@@ -6582,7 +6585,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void genPartnCols(String dest, Operator input, QB qb,
-      TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
+                            TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
     boolean enforceBucketing = false;
     ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 
@@ -6627,6 +6630,128 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
   }
 
+  private ImmutableBitSet getEnabledNotNullConstraints(Table tbl) throws HiveException{
+    List<Boolean> nullConstraints = new ArrayList<>();
+    final NotNullConstraint nnc = Hive.get().getEnabledNotNullConstraints(
+        tbl.getDbName(), tbl.getTableName());
+    ImmutableBitSet bitSet = null;
+    if(nnc == null || nnc.getNotNullConstraints().isEmpty()) {
+      return bitSet;
+    }
+    // Build the bitset with not null columns
+    ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
+    for (String nnCol : nnc.getNotNullConstraints().values()) {
+      int nnPos = -1;
+      for (int i = 0; i < tbl.getCols().size(); i++) {
+        if (tbl.getCols().get(i).getName().equals(nnCol)) {
+          nnPos = i;
+          builder.set(nnPos);
+          break;
+        }
+      }
+    }
+    bitSet = builder.build();
+    return bitSet;
+  }
+
+  private boolean mergeCardinalityViolationBranch(final Operator input) {
+    if(input instanceof SelectOperator) {
+      SelectOperator selectOp = (SelectOperator)input;
+      if(selectOp.getConf().getColList().size() == 1) {
+        ExprNodeDesc colExpr = selectOp.getConf().getColList().get(0);
+        if(colExpr instanceof ExprNodeGenericFuncDesc) {
+          ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc)colExpr ;
+          if(func.getGenericUDF() instanceof GenericUDFCardinalityViolation){
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  private Operator
+  genIsNotNullConstraint(String dest, QB qb, Operator input)
+      throws SemanticException {
+
+    boolean forceNotNullConstraint = conf.getBoolVar(ConfVars.HIVE_ENFORCE_NOT_NULL_CONSTRAINT);
+    if(!forceNotNullConstraint) {
+      return input;
+    }
+
+    if(deleting(dest)) {
+      // for DELETE statements NOT NULL constraint need not be checked
+      return input;
+    }
+
+    //MERGE statements could have inserted a cardinality violation branch, we need to avoid that
+    if(mergeCardinalityViolationBranch(input)){
+      return input;
+    }
+
+    // if this is an insert into statement we might need to add constraint check
+    Table targetTable = null;
+    Integer dest_type = qb.getMetaData().getDestTypeForAlias(dest);
+    if(dest_type == QBMetaData.DEST_TABLE) {
+      targetTable= qb.getMetaData().getDestTableForAlias(dest);
+
+    }
+    else if(dest_type == QBMetaData.DEST_PARTITION){
+      Partition dest_part = qb.getMetaData().getDestPartitionForAlias(dest);
+      targetTable = dest_part.getTable();
+
+    }
+    else {
+      throw new SemanticException("Generating NOT NULL constraint check: Invalid target type: " + dest);
+    }
+    ImmutableBitSet nullConstraintBitSet = null;
+    try {
+      nullConstraintBitSet = getEnabledNotNullConstraints(targetTable);
+    } catch (Exception e) {
+      if (e instanceof SemanticException) {
+        throw (SemanticException) e;
+      } else {
+        throw (new RuntimeException(e));
+      }
+    }
+    if(nullConstraintBitSet == null) {
+      return input;
+    }
+    List<ColumnInfo> colInfos = input.getSchema().getSignature();
+
+    ExprNodeDesc currUDF = null;
+    int constraintIdx = 0;
+    for(int colExprIdx=0; colExprIdx < colInfos.size(); colExprIdx++) {
+      if(updating(dest) && colExprIdx == 0) {
+        // for updates first column is _rowid
+        continue;
+      }
+      if (nullConstraintBitSet.indexOf(constraintIdx) != -1) {
+        ExprNodeDesc currExpr = TypeCheckProcFactory.toExprNodeDesc(colInfos.get(colExprIdx));
+        ExprNodeDesc isNotNullUDF = TypeCheckProcFactory.DefaultExprProcessor.
+            getFuncExprNodeDesc("isnotnull", currExpr);
+        ExprNodeDesc constraintUDF = TypeCheckProcFactory.DefaultExprProcessor.
+            getFuncExprNodeDesc("enforce_constraint", isNotNullUDF);
+        if (currUDF != null) {
+          currUDF = TypeCheckProcFactory.DefaultExprProcessor.
+              getFuncExprNodeDesc("and", currUDF, constraintUDF);
+        } else {
+          currUDF = constraintUDF;
+        }
+      }
+      constraintIdx++;
+    }
+    if (currUDF != null) {
+      assert (input.getParentOperators().size() == 1);
+      RowResolver inputRR = opParseCtx.get(input).getRowResolver();
+      Operator newConstraintFilter = putOpInsertMap(OperatorFactory.getAndMakeChild(
+          new FilterDesc(currUDF, false), new RowSchema(
+              inputRR.getColumnInfos()), input), inputRR);
+
+      return newConstraintFilter;
+    }
+    return input;
+  }
   @SuppressWarnings("nls")
   protected Operator genFileSinkPlan(String dest, QB qb, Operator input)
       throws SemanticException {
@@ -6712,6 +6837,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // this table_desc does not contain the partitioning columns
       table_desc = Utilities.getTableDesc(dest_tab);
 
+      // Add NOT NULL constraint check
+      input = genIsNotNullConstraint(dest, qb, input);
+
       // Add sorting/bucketing if needed
       input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
 
@@ -6736,7 +6864,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
         } else {
           txnId = acidOp == Operation.NOT_ACID ? null :
-            SessionState.get().getTxnMgr().getCurrentTxnId();
+              SessionState.get().getTxnMgr().getCurrentTxnId();
         }
         boolean isReplace = !qb.getParseInfo().isInsertIntoTable(
             dest_tab.getDbName(), dest_tab.getTableName());
@@ -6744,8 +6872,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
         // deltas and base and leave them up to the cleaner to clean up
         LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-                dest_tab.getTableName()) && !destTableIsTransactional)
-                ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
+            dest_tab.getTableName()) && !destTableIsTransactional)
+            ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
         ltd.setLoadFileType(loadType);
         ltd.setLbCtx(lbCtx);
         loadTableWork.add(ltd);
@@ -6755,7 +6883,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         setStatsForNonNativeTable(dest_tab);
         // true if it is insert overwrite.
         boolean overwrite = !qb.getParseInfo().isInsertIntoTable(
-                String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
+            String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
         createInsertDesc(dest_tab, overwrite);
       }
 
@@ -6791,6 +6919,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
       table_desc = Utilities.getTableDesc(dest_tab);
 
+      // Add NOT NULL constraint check
+      input = genIsNotNullConstraint(dest, qb, input);
+
       // Add sorting/bucketing if needed
       input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
 
@@ -6811,20 +6942,20 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
       } else {
         txnId = (acidOp == Operation.NOT_ACID) ? null :
-          SessionState.get().getTxnMgr().getCurrentTxnId();
+            SessionState.get().getTxnMgr().getCurrentTxnId();
       }
       ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, txnId);
       // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
       // deltas and base and leave them up to the cleaner to clean up
       LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-              dest_tab.getTableName()) && !destTableIsTransactional) // // Both Full-acid and MM tables are excluded.
-              ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
+          dest_tab.getTableName()) && !destTableIsTransactional) // // Both Full-acid and MM tables are excluded.
+          ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
       ltd.setLoadFileType(loadType);
       ltd.setLbCtx(lbCtx);
 
       loadTableWork.add(ltd);
       if (!outputs.add(new WriteEntity(dest_part,
-        determineWriteType(ltd, dest_tab.isNonNative(), dest)))) {
+          determineWriteType(ltd, dest_tab.isNonNative(), dest)))) {
 
         throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
             .getMsg(dest_tab.getTableName() + "@" + dest_part.getName()));
@@ -6872,7 +7003,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
           if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
             Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir
-                  + " from " + dest_path + " (" + isMmTable + ")");
+                + " from " + dest_path + " (" + isMmTable + ")");
           }
         } catch (Exception e) {
           throw new SemanticException("Error creating temporary folder on: "
@@ -6905,8 +7036,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
       loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols,
           colTypes,
-        destTableIsFullAcid ?//there is a change here - prev version had 'transadtional', one beofre' acid'
-            Operation.INSERT : Operation.NOT_ACID,
+          destTableIsFullAcid ?//there is a change here - prev version had 'transadtional', one beofre' acid'
+              Operation.INSERT : Operation.NOT_ACID,
           isMmCtas));
       if (tblDesc == null) {
         if (viewDesc != null) {
@@ -6914,23 +7045,23 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         } else if (qb.getIsQuery()) {
           String fileFormat;
           if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
-              fileFormat = "SequenceFile";
-              HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT, fileFormat);
-              table_desc=
-                         PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat,
-                           ThriftJDBCBinarySerDe.class);
-              // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
-              // write out formatted thrift objects to SequenceFile
-              conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
+            fileFormat = "SequenceFile";
+            HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT, fileFormat);
+            table_desc=
+                PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat,
+                    ThriftJDBCBinarySerDe.class);
+            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
+            // write out formatted thrift objects to SequenceFile
+            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
           } else {
-              fileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
-              Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
-              if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
-                serdeClass = LazyBinarySerDe2.class;
-              }
-              table_desc =
-                         PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat,
-                           serdeClass);
+            fileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
+            Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
+            if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
+              serdeClass = LazyBinarySerDe2.class;
+            }
+            table_desc =
+                PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat,
+                    serdeClass);
           }
         } else {
           table_desc = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
@@ -7009,12 +7140,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if (SessionState.get().isHiveServerQuery() &&
-      null != table_desc &&
-      table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) &&
-      HiveConf.getBoolVar(conf,HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
-        fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(true);
+        null != table_desc &&
+        table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) &&
+        HiveConf.getBoolVar(conf,HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
+      fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(true);
     } else {
-        fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(false);
+      fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(false);
     }
 
     Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
@@ -7072,7 +7203,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         String typeName = colInfo.getType().getTypeName();
         // CTAS should NOT create a VOID type
         if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) {
-            throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE.getMsg(colName));
+          throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE.getMsg(colName));
         }
         col.setType(typeName);
         field_schemas.add(col);
@@ -7108,31 +7239,31 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc,
-      Partition dest_part, Path dest_path, int currentTableId,
-      boolean destTableIsAcid, boolean destTableIsTemporary,
-      boolean destTableIsMaterialization, Path queryTmpdir,
-      SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx,
-      RowSchema fsRS, boolean canBeMerged, Table dest_tab, Long mmWriteId, boolean isMmCtas,
-      Integer dest_type, QB qb) throws SemanticException {
+                                          Partition dest_part, Path dest_path, int currentTableId,
+                                          boolean destTableIsAcid, boolean destTableIsTemporary,
+                                          boolean destTableIsMaterialization, Path queryTmpdir,
+                                          SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx,
+                                          RowSchema fsRS, boolean canBeMerged, Table dest_tab, Long mmWriteId, boolean isMmCtas,
+                                          Integer dest_type,

<TRUNCATED>

[3/5] hive git commit: HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by vg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 9fcde76..14217e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -701,7 +701,7 @@ public class TypeCheckProcFactory {
 
   }
 
-  private static ExprNodeDesc toExprNodeDesc(ColumnInfo colInfo) {
+  static ExprNodeDesc toExprNodeDesc(ColumnInfo colInfo) {
     ObjectInspector inspector = colInfo.getObjectInspector();
     if (inspector instanceof ConstantObjectInspector &&
         inspector instanceof PrimitiveObjectInspector) {

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
index e3a9e62..2accad3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
@@ -139,8 +139,10 @@ public final class OpProcFactory {
   }
 
   private static void removeOperator(Operator<? extends OperatorDesc> operator) {
-    List<Operator<? extends OperatorDesc>> children = operator.getChildOperators();
-    List<Operator<? extends OperatorDesc>> parents = operator.getParentOperators();
+    // since removeParent/removeChild updates the childOperators and parentOperators list in place
+    // we need to make a copy of list to iterator over them
+    List<Operator<? extends OperatorDesc>> children = new ArrayList<>(operator.getChildOperators());
+    List<Operator<? extends OperatorDesc>> parents = new ArrayList<>(operator.getParentOperators());
     for (Operator<? extends OperatorDesc> parent : parents) {
       parent.getChildOperators().addAll(children);
       parent.removeChild(operator);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceNotNullConstraint.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceNotNullConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceNotNullConstraint.java
new file mode 100644
index 0000000..6c469bf
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceNotNullConstraint.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+/**
+ * GenericUDFAbs.
+ *
+ */
+@Description(name = "enforce_constraint",
+    value = "_FUNC_(x) - Internal UDF to enforce NOT NULL constraint",
+    extended = "For internal use only")
+public class GenericUDFEnforceNotNullConstraint extends GenericUDF {
+  private final BooleanWritable resultBool = new BooleanWritable();
+  private transient BooleanObjectInspector boi;
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length > 1) {
+      throw new UDFArgumentLengthException(
+          "Invalid number of arguments. enforce_constraint UDF expected one argument but received: "
+              + arguments.length);
+    }
+
+    boi = (BooleanObjectInspector) arguments[0];
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+
+    Object a = arguments[0].get();
+    boolean result = boi.get(a);
+
+    if(!result) {
+      throw new UDFArgumentLengthException(
+          "NOT NULL constraint violated!");
+    }
+    resultBool.set(true);
+    return resultBool;
+  }
+
+  @Override
+  protected String getFuncName() {
+    return "enforce_constraint";
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return getStandardDisplayString(getFuncName(), children);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/alter_notnull_constraint_violation.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_notnull_constraint_violation.q b/ql/src/test/queries/clientnegative/alter_notnull_constraint_violation.q
new file mode 100644
index 0000000..b09d90f
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/alter_notnull_constraint_violation.q
@@ -0,0 +1,5 @@
+CREATE TABLE t1(i int, j int);
+insert into t1 values(1,2);
+
+alter table t1 change j j int constraint nn0 not null enforced;
+insert into t1 values(2,null);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/create_with_constraints_enable.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_with_constraints_enable.q b/ql/src/test/queries/clientnegative/create_with_constraints_enable.q
deleted file mode 100644
index 59ebb1e..0000000
--- a/ql/src/test/queries/clientnegative/create_with_constraints_enable.q
+++ /dev/null
@@ -1 +0,0 @@
-CREATE TABLE table1 (a STRING, b STRING, primary key (a) enable);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/create_with_constraints_enforced.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_with_constraints_enforced.q b/ql/src/test/queries/clientnegative/create_with_constraints_enforced.q
deleted file mode 100644
index 5b90c0e..0000000
--- a/ql/src/test/queries/clientnegative/create_with_constraints_enforced.q
+++ /dev/null
@@ -1 +0,0 @@
-CREATE TABLE table1 (a STRING, b STRING, primary key (a) enforced);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/create_with_fk_constraints_enforced.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_with_fk_constraints_enforced.q b/ql/src/test/queries/clientnegative/create_with_fk_constraints_enforced.q
new file mode 100644
index 0000000..73a3e59
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_with_fk_constraints_enforced.q
@@ -0,0 +1,3 @@
+CREATE TABLE table2 (a INT PRIMARY KEY DISABLE, b STRING);
+CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN KEY (b) REFERENCES table2(a) ENFORCED);
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/create_with_pk_constraints_enforced.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_with_pk_constraints_enforced.q b/ql/src/test/queries/clientnegative/create_with_pk_constraints_enforced.q
new file mode 100644
index 0000000..5b90c0e
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_with_pk_constraints_enforced.q
@@ -0,0 +1 @@
+CREATE TABLE table1 (a STRING, b STRING, primary key (a) enforced);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/create_with_unique_constraints_enforced.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_with_unique_constraints_enforced.q b/ql/src/test/queries/clientnegative/create_with_unique_constraints_enforced.q
new file mode 100644
index 0000000..52ef89d
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_with_unique_constraints_enforced.q
@@ -0,0 +1 @@
+CREATE TABLE table1 (a STRING UNIQUE ENFORCED, b STRING);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/insert_into_acid_notnull.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_into_acid_notnull.q b/ql/src/test/queries/clientnegative/insert_into_acid_notnull.q
new file mode 100644
index 0000000..da9aba3
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/insert_into_acid_notnull.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table acid_uami(i int,
+                 de decimal(5,2) not null enforced,
+                 vc varchar(128) not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+
+insert into table acid_uami select 1, null, null;

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q b/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q
new file mode 100644
index 0000000..da35b02
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q
@@ -0,0 +1,3 @@
+create table nullConstraintCheck(i int NOT NULL enforced, j int);
+insert into nullConstraintCheck values(null,2);
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/insert_multi_into_notnull.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_multi_into_notnull.q b/ql/src/test/queries/clientnegative/insert_multi_into_notnull.q
new file mode 100644
index 0000000..2c4ec1e
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/insert_multi_into_notnull.q
@@ -0,0 +1,6 @@
+create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING);
+create table src_multi2 (i STRING, j STRING NOT NULL ENFORCED);
+
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select key, null where key > 10 and key < 20;

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q b/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q
new file mode 100644
index 0000000..64fe4e4
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q
@@ -0,0 +1,3 @@
+create table nullConstraintCheck(i int NOT NULL enforced, j int);
+insert overwrite table nullConstraintCheck values(null,2);
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/merge_constraint_notnull.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/merge_constraint_notnull.q b/ql/src/test/queries/clientnegative/merge_constraint_notnull.q
new file mode 100644
index 0000000..8ba869b
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/merge_constraint_notnull.q
@@ -0,0 +1,17 @@
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table nonacid (key int, a1 string, value string) stored as orc;
+insert into nonacid values(1, null, 'value');
+
+create table testT (key int NOT NULL enable, a1 string NOT NULL enforced, value string)
+clustered by (value) into 2 buckets stored as orc
+tblproperties ("transactional"="true");
+insert into testT values(2,'a1masking', 'valuemasking');
+
+MERGE INTO testT as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key > 5 THEN DELETE
+WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1'
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value);
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientnegative/update_notnull_constraint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_notnull_constraint.q b/ql/src/test/queries/clientnegative/update_notnull_constraint.q
new file mode 100644
index 0000000..0ae8561
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/update_notnull_constraint.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into acid_uami values(1, 1.4, 'first');
+UPDATE acid_uami set de=null where i=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
new file mode 100644
index 0000000..b89bcb7
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
@@ -0,0 +1,196 @@
+--  SIMPLE TABLE
+-- create table with first and last column with not null
+CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED);
+
+-- insert value tuples
+explain INSERT INTO table1 values('not', 'null', 'constraint');
+INSERT INTO table1 values('not', 'null', 'constraint');
+SELECT * FROM table1;
+
+-- insert with column specified
+explain insert into table1(a,c) values('1','2');
+insert into table1(a,c) values('1','2');
+
+-- insert from select
+explain INSERT INTO table1 select key, src.value, value from src;
+INSERT INTO table1 select key, src.value, value from src;
+SELECT * FROM table1;
+
+-- insert overwrite
+explain INSERT OVERWRITE TABLE table1 select src.*, value from src;
+INSERT OVERWRITE TABLE table1 select src.*, value from src;
+SELECT * FROM table1;
+
+-- insert overwrite with if not exists
+explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src;
+INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src;
+SELECT * FROM table1;
+
+DROP TABLE table1;
+
+-- multi insert
+create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING);
+create table src_multi2 (i STRING, j STRING NOT NULL ENABLE);
+
+explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20;
+
+
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20;
+
+explain
+from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20;
+
+from src
+insert into table src_multi1 select * where src.key < 10
+insert into table src_multi2 select src.* where key > 10 and key < 20;
+
+--  ACID TABLE
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+-- SORT_QUERY_RESULTS
+create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+-- insert into values
+explain insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow');
+insert into table acid_uami values
+    (1, 109.23, 'mary had a little lamb'),
+    (6553, 923.19, 'its fleece was white as snow');
+select * from acid_uami;
+
+ --insert into select
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src;
+insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src;
+
+-- select with limit
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src limit 2;
+
+-- select with order by
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src
+        order by key limit 2;
+
+-- select with group by
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src
+        group by key, value order by key limit 2;
+
+ --overwrite
+explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src;
+insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src;
+
+-- update
+explain update acid_uami set de = 3.14 where de = 109.23 or de = 119.23;
+update acid_uami set de = 3.14 where de = 109.23 or de = 119.23;
+
+ALTER table acid_uami drop constraint nn1;
+ALTER table acid_uami CHANGE i i int constraint nn0 not null enforced;
+
+explain update acid_uami set de = 3.14159 where de = 3.14 ;
+update acid_uami set de = 3.14159 where de = 3.14 ;
+
+-- multi insert
+explain
+from src
+insert overwrite table acid_uami select cast(key as int), cast(key as decimal(5,2)), value where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+-- Table with partition
+CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED)
+    PARTITIONED BY (p1 STRING, p2 INT NOT NULL ENABLE);
+
+-- Insert into
+explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint');
+INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint');
+
+-- Insert as select
+explain INSERT INTO tablePartitioned partition(p1, p2) select key, value, value, key as p1, 3 as p2 from src limit 10;
+INSERT INTO tablePartitioned partition(p1, p2) select key, value, value, key as p1, 3 as p2 from src limit 10;
+
+select * from tablePartitioned;
+
+-- multi insert
+explain
+from src
+INSERT INTO tablePartitioned partition(p1, p2) select key, value, value, 'yesterday' as p1, 3 as p2
+insert overwrite table src_multi2 select * where key > 10 and key < 20;
+
+DROP TABLE src_multi1;
+DROP TABLE src_multi2;
+DROP TABLE acid_uami;
+
+-- MERGE statements
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table nonacid (key int, a1 string, value string) stored as orc;
+
+create table masking_test (key int NOT NULL enable, a1 string, value string)
+clustered by (value) into 2 buckets stored as orc
+tblproperties ("transactional"="true");
+
+-- with cardinality check off
+set hive.merge.cardinality.check=false;
+explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key < 5 THEN DELETE
+WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1'
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value);
+
+-- with cardinality check on
+set hive.merge.cardinality.check=true;
+explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key < 5 THEN DELETE
+WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1'
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value);
+
+explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key < 5 THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value);
+
+explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1'
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value);
+
+-- shouldn't have constraint enforcement
+explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key < 5 THEN DELETE;
+
+DROP TABLE masking_test;
+DROP TABLE nonacid;
+
+-- Test drop constraint
+create table table2(i int constraint nn5 not null enforced, j int);
+explain insert into table2 values(2, 3);
+alter table table2 drop constraint nn5;
+explain insert into table2 values(2, 3);
+DROP TABLE table2;
+
+-- temporary table
+create temporary table tttemp(i int not null enforced);
+explain insert into tttemp values(1);
+explain insert into tttemp select cast(key as int) from src;
+drop table tttemp;
+
+-- micro-managed table
+set hive.create.as.insert.only=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+create table part_mm(key int not null enforced) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+explain insert into table part_mm partition(key_mm=455) select key from src order by value limit 3;
+insert into table part_mm partition(key_mm=455) select key from src order by value limit 3;
+select key from src order by value limit 3;
+select * from part_mm;
+drop table part_mm;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/alter_notnull_constraint_violation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_notnull_constraint_violation.q.out b/ql/src/test/results/clientnegative/alter_notnull_constraint_violation.q.out
new file mode 100644
index 0000000..fe5f235
--- /dev/null
+++ b/ql/src/test/results/clientnegative/alter_notnull_constraint_violation.q.out
@@ -0,0 +1,27 @@
+PREHOOK: query: CREATE TABLE t1(i int, j int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: CREATE TABLE t1(i int, j int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: insert into t1 values(1,2)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t1
+POSTHOOK: query: insert into t1 values(1,2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.i SCRIPT []
+POSTHOOK: Lineage: t1.j SCRIPT []
+PREHOOK: query: alter table t1 change j j int constraint nn0 not null enforced
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: alter table t1 change j j int constraint nn0 not null enforced
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/create_with_constraints_enable.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_with_constraints_enable.q.out b/ql/src/test/results/clientnegative/create_with_constraints_enable.q.out
deleted file mode 100644
index 2b24412..0000000
--- a/ql/src/test/results/clientnegative/create_with_constraints_enable.q.out
+++ /dev/null
@@ -1 +0,0 @@
-FAILED: SemanticException [Error 10326]: Invalid Constraint syntax ENABLE/ENFORCED feature not supported yet. Please use DISABLE/NOT ENFORCED instead.

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/create_with_constraints_enforced.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_with_constraints_enforced.q.out b/ql/src/test/results/clientnegative/create_with_constraints_enforced.q.out
deleted file mode 100644
index 2b24412..0000000
--- a/ql/src/test/results/clientnegative/create_with_constraints_enforced.q.out
+++ /dev/null
@@ -1 +0,0 @@
-FAILED: SemanticException [Error 10326]: Invalid Constraint syntax ENABLE/ENFORCED feature not supported yet. Please use DISABLE/NOT ENFORCED instead.

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/create_with_fk_constraints_enforced.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_with_fk_constraints_enforced.q.out b/ql/src/test/results/clientnegative/create_with_fk_constraints_enforced.q.out
new file mode 100644
index 0000000..38dca87
--- /dev/null
+++ b/ql/src/test/results/clientnegative/create_with_fk_constraints_enforced.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: CREATE TABLE table2 (a INT PRIMARY KEY DISABLE, b STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table2
+POSTHOOK: query: CREATE TABLE table2 (a INT PRIMARY KEY DISABLE, b STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table2
+FAILED: SemanticException [Error 10325]: Invalid Foreign Key syntax ENABLE feature not supported yet. Please use DISABLE instead.

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/create_with_pk_constraints_enforced.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_with_pk_constraints_enforced.q.out b/ql/src/test/results/clientnegative/create_with_pk_constraints_enforced.q.out
new file mode 100644
index 0000000..2b24412
--- /dev/null
+++ b/ql/src/test/results/clientnegative/create_with_pk_constraints_enforced.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException [Error 10326]: Invalid Constraint syntax ENABLE/ENFORCED feature not supported yet. Please use DISABLE/NOT ENFORCED instead.

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/create_with_unique_constraints_enforced.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_with_unique_constraints_enforced.q.out b/ql/src/test/results/clientnegative/create_with_unique_constraints_enforced.q.out
new file mode 100644
index 0000000..2b24412
--- /dev/null
+++ b/ql/src/test/results/clientnegative/create_with_unique_constraints_enforced.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException [Error 10326]: Invalid Constraint syntax ENABLE/ENFORCED feature not supported yet. Please use DISABLE/NOT ENFORCED instead.

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/insert_into_acid_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_into_acid_notnull.q.out b/ql/src/test/results/clientnegative/insert_into_acid_notnull.q.out
new file mode 100644
index 0000000..721c7e7
--- /dev/null
+++ b/ql/src/test/results/clientnegative/insert_into_acid_notnull.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) not null enforced,
+                 vc varchar(128) not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_uami
+POSTHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) not null enforced,
+                 vc varchar(128) not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_uami
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out b/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out
new file mode 100644
index 0000000..6718ae0
--- /dev/null
+++ b/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nullConstraintCheck
+POSTHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nullConstraintCheck
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/insert_multi_into_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_multi_into_notnull.q.out b/ql/src/test/results/clientnegative/insert_multi_into_notnull.q.out
new file mode 100644
index 0000000..d54e6cf
--- /dev/null
+++ b/ql/src/test/results/clientnegative/insert_multi_into_notnull.q.out
@@ -0,0 +1,17 @@
+PREHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL ENFORCED)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL ENFORCED)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out b/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out
new file mode 100644
index 0000000..6718ae0
--- /dev/null
+++ b/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nullConstraintCheck
+POSTHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nullConstraintCheck
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/merge_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/merge_constraint_notnull.q.out b/ql/src/test/results/clientnegative/merge_constraint_notnull.q.out
new file mode 100644
index 0000000..088e249
--- /dev/null
+++ b/ql/src/test/results/clientnegative/merge_constraint_notnull.q.out
@@ -0,0 +1,55 @@
+PREHOOK: query: create table nonacid (key int, a1 string, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nonacid
+POSTHOOK: query: create table nonacid (key int, a1 string, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nonacid
+PREHOOK: query: insert into nonacid values(1, null, 'value')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@nonacid
+POSTHOOK: query: insert into nonacid values(1, null, 'value')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@nonacid
+POSTHOOK: Lineage: nonacid.a1 EXPRESSION []
+POSTHOOK: Lineage: nonacid.key SCRIPT []
+POSTHOOK: Lineage: nonacid.value SCRIPT []
+PREHOOK: query: create table testT (key int NOT NULL enable, a1 string NOT NULL enforced, value string)
+clustered by (value) into 2 buckets stored as orc
+tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testT
+POSTHOOK: query: create table testT (key int NOT NULL enable, a1 string NOT NULL enforced, value string)
+clustered by (value) into 2 buckets stored as orc
+tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testT
+PREHOOK: query: insert into testT values(2,'a1masking', 'valuemasking')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testt
+POSTHOOK: query: insert into testT values(2,'a1masking', 'valuemasking')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testt
+POSTHOOK: Lineage: testt.a1 SCRIPT []
+POSTHOOK: Lineage: testt.key SCRIPT []
+POSTHOOK: Lineage: testt.value SCRIPT []
+PREHOOK: query: MERGE INTO testT as t using nonacid as s ON t.key = s.key
+WHEN MATCHED AND s.key > 5 THEN DELETE
+WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1'
+WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nonacid
+PREHOOK: Input: default@testt
+PREHOOK: Output: default@merge_tmp_table
+PREHOOK: Output: default@testt
+PREHOOK: Output: default@testt
+PREHOOK: Output: default@testt
+#### A masked pattern was here ####
+FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/test/results/clientnegative/update_notnull_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/update_notnull_constraint.q.out b/ql/src/test/results/clientnegative/update_notnull_constraint.q.out
new file mode 100644
index 0000000..427ca4d
--- /dev/null
+++ b/ql/src/test/results/clientnegative/update_notnull_constraint.q.out
@@ -0,0 +1,24 @@
+PREHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_uami
+POSTHOOK: query: create table acid_uami(i int,
+                 de decimal(5,2) constraint nn1 not null enforced,
+                 vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_uami
+PREHOOK: query: insert into acid_uami values(1, 1.4, 'first')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@acid_uami
+POSTHOOK: query: insert into acid_uami values(1, 1.4, 'first')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@acid_uami
+POSTHOOK: Lineage: acid_uami.de SCRIPT []
+POSTHOOK: Lineage: acid_uami.i SCRIPT []
+POSTHOOK: Lineage: acid_uami.vc SCRIPT []
+FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated!


[5/5] hive git commit: HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by vg...@apache.org.
HIVE-16605: Enforce NOT NULL constraint (Vineet Garg, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c50ebb34
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c50ebb34
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c50ebb34

Branch: refs/heads/master
Commit: c50ebb34bf140956083ba805f9a1a26a795648d0
Parents: 31207ed
Author: Vineet Garg <vg...@apache.org>
Authored: Wed Feb 7 11:04:30 2018 -0800
Committer: Vineet Garg <vg...@apache.org>
Committed: Wed Feb 7 11:05:46 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    3 +
 .../test/resources/testconfiguration.properties |    1 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |    1 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   37 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |   11 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 1199 ++--
 .../hive/ql/parse/TypeCheckProcFactory.java     |    2 +-
 .../hadoop/hive/ql/ppd/OpProcFactory.java       |    6 +-
 .../GenericUDFEnforceNotNullConstraint.java     |   76 +
 .../alter_notnull_constraint_violation.q        |    5 +
 .../create_with_constraints_enable.q            |    1 -
 .../create_with_constraints_enforced.q          |    1 -
 .../create_with_fk_constraints_enforced.q       |    3 +
 .../create_with_pk_constraints_enforced.q       |    1 +
 .../create_with_unique_constraints_enforced.q   |    1 +
 .../clientnegative/insert_into_acid_notnull.q   |    9 +
 .../insert_into_notnull_constraint.q            |    3 +
 .../clientnegative/insert_multi_into_notnull.q  |    6 +
 .../insert_overwrite_notnull_constraint.q       |    3 +
 .../clientnegative/merge_constraint_notnull.q   |   17 +
 .../clientnegative/update_notnull_constraint.q  |    9 +
 .../clientpositive/enforce_constraint_notnull.q |  196 +
 .../alter_notnull_constraint_violation.q.out    |   27 +
 .../create_with_constraints_enable.q.out        |    1 -
 .../create_with_constraints_enforced.q.out      |    1 -
 .../create_with_fk_constraints_enforced.q.out   |    9 +
 .../create_with_pk_constraints_enforced.q.out   |    1 +
 ...reate_with_unique_constraints_enforced.q.out |    1 +
 .../insert_into_acid_notnull.q.out              |   13 +
 .../insert_into_notnull_constraint.q.out        |    9 +
 .../insert_multi_into_notnull.q.out             |   17 +
 .../insert_overwrite_notnull_constraint.q.out   |    9 +
 .../merge_constraint_notnull.q.out              |   55 +
 .../update_notnull_constraint.q.out             |   24 +
 .../llap/enforce_constraint_notnull.q.out       | 6010 ++++++++++++++++++
 .../results/clientpositive/show_functions.q.out |    1 +
 36 files changed, 7224 insertions(+), 545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f3e74eb..26e08e4 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1580,6 +1580,9 @@ public class HiveConf extends Configuration {
         "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
         "query will fail if hive.enforce.bucketmapjoin is set to true."),
 
+    HIVE_ENFORCE_NOT_NULL_CONSTRAINT("hive.constraint.notnull.enforce", true,
+        "Should \"IS NOT NULL \" constraint be enforced?"),
+
     HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false,
         "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
     HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true,

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 32b7551..9a76b85 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -541,6 +541,7 @@ minillaplocal.query.files=\
   dynpart_sort_opt_vectorization.q,\
   dynpart_sort_optimization.q,\
   dynpart_sort_optimization_acid.q,\
+  enforce_constraint_notnull.q,\
   escape1.q,\
   escape2.q,\
   exchgpartition2lel.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 76e8563..f7801bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -214,6 +214,7 @@ public final class FunctionRegistry {
     system.registerUDF("rand", UDFRand.class, false);
     system.registerGenericUDF("abs", GenericUDFAbs.class);
     system.registerGenericUDF("sq_count_check", GenericUDFSQCountCheck.class);
+    system.registerGenericUDF("enforce_constraint", GenericUDFEnforceNotNullConstraint.class);
     system.registerGenericUDF("pmod", GenericUDFPosMod.class);
 
     system.registerUDF("ln", UDFLn.class, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 07999e2..109f4c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -31,7 +31,17 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
@@ -40,6 +50,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 import javax.jdo.JDODataStoreException;
@@ -4810,6 +4821,30 @@ private void constructOneLBLocationMap(FileStatus fSta,
     return getNotNullConstraints(dbName, tblName, true);
   }
 
+  /**
+   * Get not null constraints associated with the table that are enabled/enforced.
+   *
+   * @param dbName Database Name
+   * @param tblName Table Name
+   * @return Not null constraints associated with the table.
+   * @throws HiveException
+   */
+  public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblName)
+      throws HiveException {
+    try {
+      List<SQLNotNullConstraint> notNullConstraints = getMSC().getNotNullConstraints(
+              new NotNullConstraintsRequest(dbName, tblName));
+      if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+        notNullConstraints = notNullConstraints.stream()
+          .filter(nnc -> nnc.isEnable_cstr())
+          .collect(Collectors.toList());
+      }
+      return new NotNullConstraint(notNullConstraints, tblName, dbName);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
   private NotNullConstraint getNotNullConstraints(String dbName, String tblName, boolean onlyReliable)
       throws HiveException {
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/c50ebb34/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 372cfad..e553a81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -778,8 +778,10 @@ public abstract class BaseSemanticAnalyzer {
         constraintName = unescapeIdentifier(grandChild.getChild(0).getText().toLowerCase());
       } else if (type == HiveParser.TOK_ENABLE) {
         enable = true;
-        // validate is true by default if we enable the constraint
-        validate = true;
+        // validate is false by default if we enable the constraint
+        // TODO: A constraint like NOT NULL could be enabled using ALTER but VALIDATE remains
+        //  false in such cases. Ideally VALIDATE should be set to true to validate existing data
+        validate = false;
       } else if (type == HiveParser.TOK_DISABLE) {
         enable = false;
         // validate is false by default if we disable the constraint
@@ -792,7 +794,10 @@ public abstract class BaseSemanticAnalyzer {
         rely = true;
       }
     }
-    if (enable) {
+
+    // NOT NULL constraint could be enforced/enabled
+    if (child.getToken().getType() != HiveParser.TOK_NOT_NULL
+        && enable) {
       throw new SemanticException(
           ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("ENABLE/ENFORCED feature not supported yet. "
               + "Please use DISABLE/NOT ENFORCED instead."));