You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2015/05/12 03:23:50 UTC

[04/39] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/25310407
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/25310407
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/25310407

Branch: refs/heads/llap
Commit: 2531040758e796c5cc469a893c50a8f5a388ded6
Parents: 632a309
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed May 6 16:19:59 2015 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed May 6 16:19:59 2015 -0700

----------------------------------------------------------------------
 data/files/tjoin1.txt                           |     3 +
 data/files/tjoin2.txt                           |     4 +
 .../test/resources/testconfiguration.properties |     2 +
 .../exec/vector/VectorMapJoinBaseOperator.java  |   185 +
 .../ql/exec/vector/VectorMapJoinOperator.java   |   129 +-
 .../VectorMapJoinOuterFilteredOperator.java     |   120 +
 .../VectorMapJoinGenerateResultOperator.java    |     5 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |    23 +-
 .../clientpositive/vector_left_outer_join2.q    |    62 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   403 +
 .../tez/vector_left_outer_join2.q.out           |   553 +
 .../tez/vector_leftsemi_mapjoin.q.out           | 13807 +++++++++++++++++
 .../vector_left_outer_join2.q.out               |   568 +
 .../vector_leftsemi_mapjoin.q.out               | 13572 ++++++++++++++++
 14 files changed, 29317 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin1.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin1.txt b/data/files/tjoin1.txt
new file mode 100644
index 0000000..897e0c5
--- /dev/null
+++ b/data/files/tjoin1.txt
@@ -0,0 +1,3 @@
+0|10|15
+1|20|25
+2|\N|50
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin2.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin2.txt b/data/files/tjoin2.txt
new file mode 100644
index 0000000..24820e9
--- /dev/null
+++ b/data/files/tjoin2.txt
@@ -0,0 +1,4 @@
+0|10|BB
+1|15|DD
+2|\N|EE
+3|10|FF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 8e9984a..134fded 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -222,6 +222,8 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   vector_interval_1.q,\
   vector_interval_2.q,\
   vector_left_outer_join.q,\
+  vector_left_outer_join2.q,\
+  vector_leftsemi_mapjoin.q,\
   vector_mapjoin_reduce.q,\
   vector_mr_diff_schema_alias.q,\
   vector_multi_insert.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
new file mode 100644
index 0000000..0baec2c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+/**
+ * The *NON-NATIVE* base vector map join operator class used by VectorMapJoinOperator and
+ * VectorMapJoinOuterFilteredOperator.
+ *
+ * It has common variables and code for the output batch, Hybrid Grace spill batch, and more.
+ */
+public class VectorMapJoinBaseOperator extends MapJoinOperator implements VectorizationContextRegion {
+
+  private static final Log LOG = LogFactory.getLog(VectorMapJoinBaseOperator.class.getName());
+
+  private static final long serialVersionUID = 1L;
+
+  protected VectorizationContext vOutContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  protected transient VectorizedRowBatch outputBatch;
+  protected transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
+  protected transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
+
+  protected transient VectorizedRowBatchCtx vrbCtx = null;
+
+  protected transient int tag;  // big table alias
+
+  public VectorMapJoinBaseOperator() {
+    super();
+  }
+
+  public VectorMapJoinBaseOperator (VectorizationContext vContext, OperatorDesc conf)
+    throws HiveException {
+    super();
+
+    MapJoinDesc desc = (MapJoinDesc) conf;
+    this.conf = desc;
+
+    order = desc.getTagOrder();
+    numAliases = desc.getExprs().size();
+    posBigTable = (byte) desc.getPosBigTable();
+    filterMaps = desc.getFilterMap();
+    noOuterJoin = desc.isNoOuterJoin();
+
+     // We are making a new output vectorized row batch.
+    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    vrbCtx = new VectorizedRowBatchCtx();
+    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
+
+    outputBatch = vrbCtx.createVectorizedRowBatch();
+
+    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
+
+    return result;
+  }
+
+  /**
+   * 'forwards' the (row-mode) record into the (vectorized) output batch
+   */
+  @Override
+  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
+    Object[] values = (Object[]) row;
+    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
+    if (va == null) {
+      va = new VectorAssignRowSameBatch();
+      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
+      va.setOneBatch(outputBatch);
+      outputVectorAssignRowMap.put(outputOI, va);
+    }
+
+    va.assignRow(outputBatch.size, values);
+
+    ++outputBatch.size;
+    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+      flushOutput();
+    }
+  }
+
+  private void flushOutput() throws HiveException {
+    forward(outputBatch, null);
+    outputBatch.reset();
+  }
+
+  @Override
+  public void closeOp(boolean aborted) throws HiveException {
+    super.closeOp(aborted);
+    for (MapJoinTableContainer tableContainer : mapJoinTables) {
+      if (tableContainer != null) {
+        tableContainer.dumpMetrics();
+      }
+    }
+    if (!aborted && 0 < outputBatch.size) {
+      flushOutput();
+    }
+  }
+
+  /**
+   * For a vectorized row batch from the rows feed from the super MapJoinOperator.
+   */
+  @Override
+  protected void reProcessBigTable(int partitionId)
+      throws HiveException {
+
+    if (scratchBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
+    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
+    ObjectContainer bigTable = partition.getMatchfileObjContainer();
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    while (bigTable.hasNext()) {
+      Object row = bigTable.next();
+      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
+          (StructObjectInspector) inputObjInspectors[posBigTable],
+          scratchBatch.size, scratchBatch, dataOutputBuffer);
+      scratchBatch.size++;
+
+      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+        process(scratchBatch, tag); // call process once we have a full batch
+        scratchBatch.reset();
+        dataOutputBuffer.reset();
+      }
+    }
+    // Process the row batch that has less than DEFAULT_SIZE rows
+    if (scratchBatch.size > 0) {
+      process(scratchBatch, tag);
+      scratchBatch.reset();
+      dataOutputBuffer.reset();
+    }
+    bigTable.clear();
+  }
+
+  @Override
+  public VectorizationContext getOuputVectorizationContext() {
+    return vOutContext;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
index 0547346..15c747e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
@@ -18,10 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Future;
@@ -31,11 +29,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
-import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor;
-import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -45,36 +40,28 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.io.DataOutputBuffer;
 
 /**
  * The vectorized version of the MapJoinOperator.
  */
-public class VectorMapJoinOperator extends MapJoinOperator implements VectorizationContextRegion {
+public class VectorMapJoinOperator extends VectorMapJoinBaseOperator {
 
-  private static final Log LOG = LogFactory.getLog(
-      VectorMapJoinOperator.class.getName());
-
-   /**
-   *
-   */
   private static final long serialVersionUID = 1L;
 
-  private VectorExpression[] keyExpressions;
+  private static final Log LOG = LogFactory.getLog(
+      VectorMapJoinOperator.class.getName());
 
-  private VectorExpression[] bigTableFilterExpressions;
-  private VectorExpression[] bigTableValueExpressions;
+  protected VectorExpression[] keyExpressions;
 
-  private VectorizationContext vOutContext;
+  protected VectorExpression[] bigTableFilterExpressions;
+  protected VectorExpression[] bigTableValueExpressions;
 
   // The above members are initialized by the constructor and must not be
   // transient.
   //---------------------------------------------------------------------------
 
-  private transient VectorizedRowBatch outputBatch;
-  private transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
   private transient VectorExpressionWriter[] valueWriters;
-  private transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
 
   // These members are used as out-of-band params
   // for the inner-loop supper.processOp callbacks
@@ -84,9 +71,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   private transient VectorHashKeyWrapperBatch keyWrapperBatch;
   private transient VectorExpressionWriter[] keyOutputWriters;
 
-  private transient VectorizedRowBatchCtx vrbCtx = null;
-
-  private transient int tag;  // big table alias
   private VectorExpressionWriter[] rowWriters;  // Writer for producing row from input batch
   protected transient Object[] singleRow;
 
@@ -97,16 +81,10 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   public VectorMapJoinOperator (VectorizationContext vContext, OperatorDesc conf)
     throws HiveException {
-    this();
 
-    MapJoinDesc desc = (MapJoinDesc) conf;
-    this.conf = desc;
+    super(vContext, conf);
 
-    order = desc.getTagOrder();
-    numAliases = desc.getExprs().size();
-    posBigTable = (byte) desc.getPosBigTable();
-    filterMaps = desc.getFilterMap();
-    noOuterJoin = desc.isNoOuterJoin();
+    MapJoinDesc desc = (MapJoinDesc) conf;
 
     Map<Byte, List<ExprNodeDesc>> filterExpressions = desc.getFilters();
     bigTableFilterExpressions = vContext.getVectorExpressions(filterExpressions.get(posBigTable),
@@ -118,9 +96,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // We're only going to evaluate the big table vectorized expressions,
     Map<Byte, List<ExprNodeDesc>> exprs = desc.getExprs();
     bigTableValueExpressions = vContext.getVectorExpressions(exprs.get(posBigTable));
-
-    // We are making a new output vectorized row batch.
-    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
   }
 
   @Override
@@ -144,12 +119,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     List<ExprNodeDesc> keyDesc = conf.getKeys().get(posBigTable);
     keyOutputWriters = VectorExpressionWriterFactory.getExpressionWriters(keyDesc);
 
-    vrbCtx = new VectorizedRowBatchCtx();
-    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
-
-    outputBatch = vrbCtx.createVectorizedRowBatch();
-
-    keyWrapperBatch =VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
+    keyWrapperBatch = VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
 
     Map<Byte, List<ExprNodeDesc>> valueExpressions = conf.getExprs();
     List<ExprNodeDesc> bigTableExpressions = valueExpressions.get(posBigTable);
@@ -203,51 +173,9 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // Filtering is handled in the input batch processing
     filterMaps[posBigTable] = null;
 
-    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
-
     return result;
   }
 
-  /**
-   * 'forwards' the (row-mode) record into the (vectorized) output batch
-   */
-  @Override
-  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
-    Object[] values = (Object[]) row;
-    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
-    if (va == null) {
-      va = new VectorAssignRowSameBatch();
-      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
-      va.setOneBatch(outputBatch);
-      outputVectorAssignRowMap.put(outputOI, va);
-    }
-
-    va.assignRow(outputBatch.size, values);
-
-    ++outputBatch.size;
-    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-      flushOutput();
-    }
-  }
-
-  private void flushOutput() throws HiveException {
-    forward(outputBatch, null);
-    outputBatch.reset();
-  }
-
-  @Override
-  public void closeOp(boolean aborted) throws HiveException {
-    super.closeOp(aborted);
-    for (MapJoinTableContainer tableContainer : mapJoinTables) {
-      if (tableContainer != null) {
-        tableContainer.dumpMetrics();
-      }
-    }
-    if (!aborted && 0 < outputBatch.size) {
-      flushOutput();
-    }
-  }
-
   @Override
   protected JoinUtil.JoinResult setMapJoinKey(ReusableGetAdaptor dest, Object row, byte alias)
       throws HiveException {
@@ -256,7 +184,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   @Override
   public void process(Object row, int tag) throws HiveException {
-    byte alias = (byte) tag;
+
     VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
 
     // Preparation for hybrid grace hash join
@@ -297,11 +225,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   }
 
   @Override
-  public VectorizationContext getOuputVectorizationContext() {
-    return vOutContext;
-  }
-
-  @Override
   protected void spillBigTableRow(MapJoinTableContainer hybridHtContainer, Object row)
       throws HiveException {
     // Extract the actual row from row batch
@@ -310,36 +233,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     super.spillBigTableRow(hybridHtContainer, actualRow);
   }
 
-  @Override
-  protected void reProcessBigTable(int partitionId)
-      throws HiveException {
-
-    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
-    ObjectContainer bigTable = partition.getMatchfileObjContainer();
-
-    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
-    while (bigTable.hasNext()) {
-      Object row = bigTable.next();
-      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
-          (StructObjectInspector) inputObjInspectors[posBigTable],
-          scratchBatch.size, scratchBatch, dataOutputBuffer);
-      scratchBatch.size++;
-
-      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-        process(scratchBatch, tag); // call process once we have a full batch
-        scratchBatch.reset();
-        dataOutputBuffer.reset();
-      }
-    }
-    // Process the row batch that has less than DEFAULT_SIZE rows
-    if (scratchBatch.size > 0) {
-      process(scratchBatch, tag);
-      scratchBatch.reset();
-      dataOutputBuffer.reset();
-    }
-    bigTable.clear();
-  }
-
   // Code borrowed from VectorReduceSinkOperator
   private Object[] getRowObject(VectorizedRowBatch vrb, int rowIndex) throws HiveException {
     int batchIndex = rowIndex;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
new file mode 100644
index 0000000..5aecfcc
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
+/**
+ * This is the *NON-NATIVE* vector map join operator for just LEFT OUTER JOIN and filtered.
+ *
+ * It is a row pass-thru so that super MapJoinOperator can do the outer join filtering properly.
+ *
+ */
+public class VectorMapJoinOuterFilteredOperator extends VectorMapJoinBaseOperator {
+
+  private static final long serialVersionUID = 1L;
+
+  private VectorizationContext vContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  private transient boolean firstBatch;
+
+  private transient VectorExtractRowDynBatch vectorExtractRowDynBatch;
+
+  protected transient Object[] singleRow;
+
+  public VectorMapJoinOuterFilteredOperator() {
+    super();
+  }
+
+  public VectorMapJoinOuterFilteredOperator(VectorizationContext vContext, OperatorDesc conf)
+      throws HiveException {
+    super(vContext, conf);
+
+    this.vContext = vContext;
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    // We need a input object inspector that is for the row we will extract out of the
+    // vectorized row batch, not for example, an original inspector for an ORC table, etc.
+    inputObjInspectors[0] =
+        VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]);
+
+    // Call super VectorMapJoinOuterFilteredOperator, which calls super MapJoinOperator with
+    // new input inspector.
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    firstBatch = true;
+
+    return result;
+  }
+
+  @Override
+  public void process(Object data, int tag) throws HiveException {
+
+    VectorizedRowBatch batch = (VectorizedRowBatch) data;
+
+    // Preparation for hybrid grace hash join
+    this.tag = tag;
+    if (scratchBatch == null) {
+      scratchBatch = VectorizedBatchUtil.makeLike(batch);
+    }
+
+    if (firstBatch) {
+      vectorExtractRowDynBatch = new VectorExtractRowDynBatch();
+      vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns());
+
+      singleRow = new Object[vectorExtractRowDynBatch.getCount()];
+
+      firstBatch = false;
+    }
+
+
+    vectorExtractRowDynBatch.setBatchOnEntry(batch);
+
+    // VectorizedBatchUtil.debugDisplayBatch( batch, "VectorReduceSinkOperator processOp ");
+
+    if (batch.selectedInUse) {
+      int selected[] = batch.selected;
+      for (int logical = 0 ; logical < batch.size; logical++) {
+        int batchIndex = selected[logical];
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    } else {
+      for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) {
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    }
+
+    vectorExtractRowDynBatch.forgetBatchOnExit();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index 0f1c7a8..860ebb5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -523,6 +523,11 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
 
     LOG.info(CLASS_NAME + " reProcessBigTable enter...");
 
+    if (spillReplayBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
     HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
 
     int rowCount = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 64d7c3e..096239e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -54,6 +54,9 @@ import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiString
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterLongOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterMultiKeyOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
@@ -1596,7 +1599,25 @@ public class Vectorizer implements PhysicalPlanResolver {
           boolean specialize = canSpecializeMapJoin(op, desc, isTez);
 
           if (!specialize) {
-            vectorOp = OperatorFactory.getVectorOperator(desc, vContext);
+
+            Class<? extends Operator<?>> opClass = null;
+            if (op instanceof MapJoinOperator) {
+
+              // *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
+
+              List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
+              boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
+              if (!isOuterAndFiltered) {
+                opClass = VectorMapJoinOperator.class;
+              } else {
+                opClass = VectorMapJoinOuterFilteredOperator.class;
+              }
+            } else if (op instanceof SMBMapJoinOperator) {
+              opClass = VectorSMBMapJoinOperator.class;
+            }
+
+            vectorOp = OperatorFactory.getVectorOperator(opClass, op.getConf(), vContext);
+
           } else {
 
             // TEMPORARY Until Native Vector Map Join with Hybrid passes tests...

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
new file mode 100644
index 0000000..098d002
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
@@ -0,0 +1,62 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+drop table if exists TJOIN1;
+drop table if exists TJOIN2;
+create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc;
+create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc;
+create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE;
+INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE;
+INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
new file mode 100644
index 0000000..522ab12
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
@@ -0,0 +1,403 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+-- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10;
+
+select * from t1 sort by key;
+
+create table t2 stored as orc as select cast(2*key as int) key, value from t1;
+
+select * from t2 sort by key;
+
+create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b;
+select * from t3 sort by key, value;
+
+create table t4 (key int, value string) stored as orc;
+select * from t4;
+
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
new file mode 100644
index 0000000..929194e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
@@ -0,0 +1,553 @@
+PREHOOK: query: drop table if exists TJOIN1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists TJOIN2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1
+POSTHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1
+PREHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2
+POSTHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2
+PREHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1STAGE
+POSTHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1STAGE
+PREHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2STAGE
+POSTHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2STAGE
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin1stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin1stage
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin2stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin2stage
+PREHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1stage
+PREHOOK: Output: default@tjoin1
+POSTHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1stage
+POSTHOOK: Output: default@tjoin1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 EXPRESSION [(tjoin1stage)tjoin1stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin2stage
+PREHOOK: Output: default@tjoin2
+POSTHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin2stage
+POSTHOOK: Output: default@tjoin2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+2	NULL	50	NULL
+0	10	15	BB
+0	10	15	FF
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+2	NULL	50	NULL
+1	20	25	NULL
+0	10	15	BB
+0	10	15	FF