You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mm...@apache.org on 2017/01/18 08:39:28 UTC

hive git commit: HIVE-15627: Make hive.vectorized.adaptor.usage.mode=all vectorize all UDFs not just those in supportedGenericUDFs (Matt McCline, reviewed by Gopal Vijayaraghavan)

Repository: hive
Updated Branches:
  refs/heads/master 640fc55da -> ffeb6c87c


HIVE-15627: Make hive.vectorized.adaptor.usage.mode=all vectorize all UDFs not just those in supportedGenericUDFs (Matt McCline, reviewed by Gopal Vijayaraghavan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ffeb6c87
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ffeb6c87
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ffeb6c87

Branch: refs/heads/master
Commit: ffeb6c87c793a70e030108d5ee951e63cd5fd67d
Parents: 640fc55
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed Jan 18 00:39:16 2017 -0800
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed Jan 18 00:39:16 2017 -0800

----------------------------------------------------------------------
 .../ql/exec/vector/VectorizationContext.java    |   2 +-
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |  10 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |  27 +-
 .../test/queries/clientpositive/vector_udf1.q   |  57 ++-
 .../results/clientpositive/llap/orc_llap.q.out  |  16 +-
 .../clientpositive/llap/vector_join30.q.out     |   8 +-
 .../llap/vector_number_compare_projection.q.out |   4 +-
 .../clientpositive/llap/vector_udf1.q.out       | 470 +++++++++++++++----
 8 files changed, 474 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 7f91227..c887757 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -168,7 +168,7 @@ public class VectorizationContext {
   // private final Map<String, Integer> columnMap;
   private int firstOutputColumnIndex;
 
-  private enum HiveVectorAdaptorUsageMode {
+  public enum HiveVectorAdaptorUsageMode {
     NONE,
     CHOSEN,
     ALL;

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index 20cfb89..51e2d78 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
 import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableBinaryObjectInspector;
+import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -330,8 +332,14 @@ public class VectorUDFAdaptor extends VectorExpression {
         HiveDecimal hd = ((WritableHiveDecimalObjectInspector) outputOI).getPrimitiveJavaObject(value);
         dcv.set(i, hd);
       }
+    } else if (outputOI instanceof WritableBinaryObjectInspector) {
+      BytesWritable bw = (BytesWritable) value;
+      BytesColumnVector bv = (BytesColumnVector) colVec;
+      bv.setVal(i, bw.getBytes(), 0, bw.getLength());
     } else {
-      throw new RuntimeException("Unhandled object type " + outputOI.getTypeName());
+      throw new RuntimeException("Unhandled object type " + outputOI.getTypeName() +
+          " inspector class " + outputOI.getClass().getName() +
+          " value class " + value.getClass().getName());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 2a99274..814fe6c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext.HiveVectorAdaptorUsageMode;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext.InConstantType;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.IdentityExpression;
@@ -240,8 +241,16 @@ public class Vectorizer implements PhysicalPlanResolver {
 
   boolean isSchemaEvolution;
 
+  HiveVectorAdaptorUsageMode hiveVectorAdaptorUsageMode;
+
   public Vectorizer() {
 
+    /*
+     * We check UDFs against the supportedGenericUDFs when
+     * hive.vectorized.adaptor.usage.mode=chosen or none.
+     *
+     * We allow all UDFs for hive.vectorized.adaptor.usage.mode=all.
+     */
     supportedGenericUDFs.add(GenericUDFOPPlus.class);
     supportedGenericUDFs.add(GenericUDFOPMinus.class);
     supportedGenericUDFs.add(GenericUDFOPMultiply.class);
@@ -1394,6 +1403,8 @@ public class Vectorizer implements PhysicalPlanResolver {
         HiveConf.getBoolVar(hiveConf,
             HiveConf.ConfVars.HIVE_SCHEMA_EVOLUTION);
 
+    hiveVectorAdaptorUsageMode = HiveVectorAdaptorUsageMode.getHiveConfValue(hiveConf);
+
     // create dispatcher and graph walker
     Dispatcher disp = new VectorizationDispatcher(physicalContext);
     TaskGraphWalker ogw = new TaskGraphWalker(disp);
@@ -1900,13 +1911,17 @@ public class Vectorizer implements PhysicalPlanResolver {
     if (VectorizationContext.isCustomUDF(genericUDFExpr)) {
       return true;
     }
-    GenericUDF genericUDF = genericUDFExpr.getGenericUDF();
-    if (genericUDF instanceof GenericUDFBridge) {
-      Class<? extends UDF> udf = ((GenericUDFBridge) genericUDF).getUdfClass();
-      return supportedGenericUDFs.contains(udf);
-    } else {
-      return supportedGenericUDFs.contains(genericUDF.getClass());
+    if (hiveVectorAdaptorUsageMode == HiveVectorAdaptorUsageMode.NONE ||
+        hiveVectorAdaptorUsageMode == HiveVectorAdaptorUsageMode.CHOSEN) {
+      GenericUDF genericUDF = genericUDFExpr.getGenericUDF();
+      if (genericUDF instanceof GenericUDFBridge) {
+        Class<? extends UDF> udf = ((GenericUDFBridge) genericUDF).getUdfClass();
+        return supportedGenericUDFs.contains(udf);
+      } else {
+        return supportedGenericUDFs.contains(genericUDF.getClass());
+      }
     }
+    return true;
   }
 
   private boolean validateAggregationIsPrimitive(VectorAggregateExpression vectorAggrExpr) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/test/queries/clientpositive/vector_udf1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_udf1.q b/ql/src/test/queries/clientpositive/vector_udf1.q
index 2fcc69b..6ebe58f 100644
--- a/ql/src/test/queries/clientpositive/vector_udf1.q
+++ b/ql/src/test/queries/clientpositive/vector_udf1.q
@@ -3,9 +3,10 @@ set hive.fetch.task.conversion=none;
 
 drop table varchar_udf_1;
 
-create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC;
+create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20),
+     d1 string, d2 string, d3 varchar(10), d4 varchar(10)) STORED AS ORC;
 insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1;
+  select key, value, key, value, '2015-01-14', '2015-01-14', '2017-01-11', '2017-01-11' from src where key = '238' limit 1;
 
 -- UDFs with varchar support
 explain
@@ -102,6 +103,58 @@ from varchar_udf_1 limit 1;
 
 explain
 select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1;
+
+select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1;
+
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1;
+
+select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1;
+
+select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1;
+
+explain
+select
   length(c2),
   length(c4),
   length(c2) = length(c4)

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/test/results/clientpositive/llap/orc_llap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap.q.out b/ql/src/test/results/clientpositive/llap/orc_llap.q.out
index 74a6b29..abe2a26 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap.q.out
@@ -278,7 +278,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -351,7 +351,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -424,7 +424,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -500,7 +500,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -745,7 +745,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -818,7 +818,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -891,7 +891,7 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
             Execution mode: vectorized, llap
@@ -967,7 +967,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/test/results/clientpositive/llap/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join30.q.out b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
index bb6916b..1a1c26a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
@@ -223,7 +223,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
@@ -376,7 +376,7 @@ STAGE PLANS:
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
@@ -536,7 +536,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
index 3b8c0dc..2a41a8c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
@@ -144,7 +144,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: hash(KEY.reducesinkkey0,KEY.reducesinkkey1,KEY.reducesinkkey2,VALUE._col0,VALUE._col1,VALUE._col2) (type: int)
@@ -223,7 +223,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: hash(KEY.reducesinkkey0,KEY.reducesinkkey1,KEY.reducesinkkey2,KEY.reducesinkkey3,VALUE._col0,VALUE._col1,VALUE._col2,VALUE._col3) (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/ffeb6c87/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
index 9450b8f..b1a37bd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
@@ -2,21 +2,23 @@ PREHOOK: query: drop table varchar_udf_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table varchar_udf_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20),
+     d1 string, d2 string, d3 varchar(10), d4 varchar(10)) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20),
+     d1 string, d2 string, d3 varchar(10), d4 varchar(10)) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_udf_1
 PREHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
+  select key, value, key, value, '2015-01-14', '2015-01-14', '2017-01-11', '2017-01-11' from src where key = '238' limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@varchar_udf_1
 POSTHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
+  select key, value, key, value, '2015-01-14', '2015-01-14', '2017-01-11', '2017-01-11' from src where key = '238' limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@varchar_udf_1
@@ -24,6 +26,10 @@ POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:
 POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.d1 SIMPLE []
+POSTHOOK: Lineage: varchar_udf_1.d2 SIMPLE []
+POSTHOOK: Lineage: varchar_udf_1.d3 EXPRESSION []
+POSTHOOK: Lineage: varchar_udf_1.d4 EXPRESSION []
 PREHOOK: query: -- UDFs with varchar support
 explain
 select 
@@ -53,17 +59,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: concat(c1, c2) (type: string), concat(c3, c4) (type: varchar(30)), (concat(c1, c2) = UDFToString(concat(c3, c4))) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -121,17 +127,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: upper(c2) (type: string), upper(c4) (type: varchar(20)), (upper(c2) = UDFToString(upper(c4))) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -189,17 +195,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: lower(c2) (type: string), lower(c4) (type: varchar(20)), (lower(c2) = UDFToString(lower(c4))) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -259,22 +265,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ascii(c2) (type: int), ascii(c4) (type: int), (ascii(c2) = ascii(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -327,22 +333,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: concat_ws('|', c1, c2) (type: string), concat_ws('|', c3, c4) (type: string), (concat_ws('|', c1, c2) = concat_ws('|', c3, c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -395,22 +401,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: decode(encode(c2,'US-ASCII'),'US-ASCII') (type: string), decode(encode(c4,'US-ASCII'),'US-ASCII') (type: string), (decode(encode(c2,'US-ASCII'),'US-ASCII') = decode(encode(c4,'US-ASCII'),'US-ASCII')) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -463,22 +469,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: instr(c2, '_') (type: int), instr(c4, '_') (type: int), (instr(c2, '_') = instr(c4, '_')) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -506,6 +512,278 @@ POSTHOOK: Input: default@varchar_udf_1
 4	4	true
 PREHOOK: query: explain
 select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: replace(c1, '_', c2) (type: string), replace(c3, '_', c4) (type: string), (replace(c1, '_', c2) = replace(c3, '_', c4)) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  replace(c1, '_', c2),
+  replace(c3, '_', c4),
+  replace(c1, '_', c2) = replace(c3, '_', c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: reverse(c2) (type: string), reverse(c4) (type: string), (reverse(c2) = reverse(c4)) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+832_lav	832_lav	true
+PREHOOK: query: explain
+select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: next_day(d1, 'TU') (type: string), next_day(d4, 'WE') (type: string), (next_day(d1, 'TU') = next_day(d4, 'WE')) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  next_day(d1, 'TU'),
+  next_day(d4, 'WE'),
+  next_day(d1, 'TU') = next_day(d4, 'WE')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+2015-01-20	2017-01-18	false
+PREHOOK: query: explain
+select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: months_between(d1, d3) (type: double), months_between(d2, d4) (type: double), (months_between(d1, d3) = months_between(d2, d4)) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  months_between(d1, d3),
+  months_between(d2, d4),
+  months_between(d1, d3) = months_between(d2, d4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+-23.90322581	-23.90322581	true
+PREHOOK: query: explain
+select
   length(c2),
   length(c4),
   length(c2) = length(c4)
@@ -531,17 +809,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: length(c2) (type: int), length(c4) (type: int), (length(c2) = length(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -599,7 +877,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: 5 (type: int), 5 (type: int), true (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
@@ -667,22 +945,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: lpad(c2, 15, ' ') (type: string), lpad(c4, 15, ' ') (type: string), (lpad(c2, 15, ' ') = lpad(c4, 15, ' ')) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -735,17 +1013,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ltrim(c2) (type: string), ltrim(c4) (type: string), (ltrim(c2) = ltrim(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -803,17 +1081,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: c2 regexp 'val' (type: boolean), c4 regexp 'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -871,17 +1149,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: regexp_extract(c2, 'val_([0-9]+)', 1) (type: string), regexp_extract(c4, 'val_([0-9]+)', 1) (type: string), (regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -939,17 +1217,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: regexp_replace(c2, 'val', 'replaced') (type: string), regexp_replace(c4, 'val', 'replaced') (type: string), (regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1007,22 +1285,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: reverse(c2) (type: string), reverse(c4) (type: string), (reverse(c2) = reverse(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -1075,22 +1353,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: rpad(c2, 15, ' ') (type: string), rpad(c4, 15, ' ') (type: string), (rpad(c2, 15, ' ') = rpad(c4, 15, ' ')) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
 
   Stage: Stage-0
@@ -1143,17 +1421,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: rtrim(c2) (type: string), rtrim(c4) (type: string), (rtrim(c2) = rtrim(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1209,7 +1487,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: sentences('See spot run.  See jane run.') (type: array<array<string>>), sentences('See spot run.  See jane run.') (type: array<array<string>>)
                     outputColumnNames: _col0, _col1
@@ -1273,17 +1551,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: split(c2, '_') (type: array<string>), split(c4, '_') (type: array<string>)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1337,7 +1615,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>), str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
                     outputColumnNames: _col0, _col1
@@ -1403,17 +1681,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: substr(c2, 1, 3) (type: string), substr(c4, 1, 3) (type: string), (substr(c2, 1, 3) = substr(c4, 1, 3)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1471,17 +1749,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: trim(c2) (type: string), trim(c4) (type: string), (trim(c2) = trim(c4)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1542,11 +1820,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: c2 (type: string), c4 (type: varchar(20))
                     outputColumnNames: _col0, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: compute_stats(_col0, 16), compute_stats(_col2, 16)
                       mode: hash
@@ -1623,11 +1901,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: c2 (type: string), c4 (type: varchar(20))
                     outputColumnNames: c2, c4
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(c2), min(c4)
                       mode: hash
@@ -1704,11 +1982,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: c2 (type: string), c4 (type: varchar(20))
                     outputColumnNames: c2, c4
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 732 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(c2), max(c4)
                       mode: hash