You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kr...@apache.org on 2022/05/10 11:46:26 UTC

[hive] branch master updated: HIVE-25969: Unable to reference table column named default (Krisztian Kasa, reviewed by Zoltan Haindrich)

This is an automated email from the ASF dual-hosted git repository.

krisztiankasa pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 62834fbdd3 HIVE-25969: Unable to reference table column named default (Krisztian Kasa, reviewed by Zoltan Haindrich)
62834fbdd3 is described below

commit 62834fbdd3bd4065413b59448759c6a25aa1dbf0
Author: Krisztian Kasa <ka...@gmail.com>
AuthorDate: Tue May 10 13:46:13 2022 +0200

    HIVE-25969: Unable to reference table column named default (Krisztian Kasa, reviewed by Zoltan Haindrich)
---
 .../apache/hadoop/hive/ql/parse/FromClauseParser.g |   7 ++
 .../org/apache/hadoop/hive/ql/parse/HiveParser.g   |   9 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g       |  14 ++-
 .../hadoop/hive/ql/parse/TestParseDefault.java     | 108 ++++++++++++++++++++
 .../hive/ql/parse/MergeSemanticAnalyzer.java       |  49 ++++-----
 .../hive/ql/parse/RewriteSemanticAnalyzer.java     |   4 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java     |  25 +++--
 .../hadoop/hive/ql/parse/UnparseTranslator.java    |  13 +++
 .../clientpositive/insert_into_default_keyword_2.q |  15 +++
 .../llap/insert_into_default_keyword_2.q.out       | 113 +++++++++++++++++++++
 10 files changed, 311 insertions(+), 46 deletions(-)

diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
index 34a91fbe0e..dbad0f7f33 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
@@ -62,6 +62,13 @@ tableOrColumn
     identifier -> ^(TOK_TABLE_OR_COL identifier)
     ;
 
+defaultValue
+@init { gParent.pushMsg("default value", state); }
+@after { gParent.popMsg(state); }
+    :
+    KW_DEFAULT -> ^(TOK_TABLE_OR_COL TOK_DEFAULT_VALUE)
+    ;
+
 expressionList
 @init { gParent.pushMsg("expression list", state); }
 @after { gParent.popMsg(state); }
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index ce63d0bc63..538d0a7127 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -2777,9 +2777,16 @@ deleteStatement
 /*SET <columName> = (3 + col2)*/
 columnAssignmentClause
    :
-   tableOrColumn EQUAL^ precedencePlusExpression
+   | tableOrColumn EQUAL^ precedencePlusExpressionOrDefault
    ;
 
+precedencePlusExpressionOrDefault
+    :
+    (KW_DEFAULT (~DOT|EOF)) => defaultValue
+    | precedencePlusExpression
+    ;
+
+
 /*SET col1 = 5, col2 = (4 + col4), ...*/
 setColumnsClause
    :
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 31f437d63b..4d807a7ef6 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -160,7 +160,7 @@ expressionsInParenthesis[boolean isStruct, boolean forceStruct]
 
 expressionsNotInParenthesis[boolean isStruct, boolean forceStruct]
     :
-    first=expression more=expressionPart[$expression.tree, isStruct]?
+    first=expressionOrDefault more=expressionPart[$expressionOrDefault.tree, isStruct]?
     -> {forceStruct && more==null}?
        ^(TOK_FUNCTION Identifier["struct"] {$first.tree})
     -> {more==null}?
@@ -170,9 +170,15 @@ expressionsNotInParenthesis[boolean isStruct, boolean forceStruct]
 
 expressionPart[CommonTree firstExprTree, boolean isStruct]
     :
-    (COMMA expression)+
-    -> {isStruct}? ^(TOK_FUNCTION Identifier["struct"] {$firstExprTree} expression+)
-    -> {$firstExprTree} expression+
+    (COMMA expressionOrDefault)+
+    -> {isStruct}? ^(TOK_FUNCTION Identifier["struct"] {$firstExprTree} expressionOrDefault+)
+    -> {$firstExprTree} expressionOrDefault+
+    ;
+
+expressionOrDefault
+    :
+    (KW_DEFAULT ~DOT) => defaultValue
+    | expression
     ;
 
 // Parses comma separated list of expressions with optionally specified aliases.
diff --git a/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDefault.java b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDefault.java
new file mode 100644
index 0000000000..707b46b7f8
--- /dev/null
+++ b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDefault.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestParseDefault {
+  ParseDriver parseDriver = new ParseDriver();
+
+  @Test
+  public void testParseDefaultKeywordInInsert() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "INSERT INTO TABLE t1 values(DEFAULT, deFaUlt)", null).getTree();
+
+    assertTrue(tree.dump(), tree.toStringTree().contains(
+            "(tok_table_or_col tok_default_value) (tok_table_or_col tok_default_value)"));
+  }
+
+  @Test
+  public void testParseDefaultKeywordInUpdate() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "update t1 set b = default", null).getTree();
+
+    assertTrue(tree.dump(), tree.toStringTree().contains(
+            "(tok_table_or_col tok_default_value)"));
+
+  }
+
+  @Test
+  public void testParseDefaultKeywordInUpdateWithWhere() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "update t1 set b = default where a = 10", null).getTree();
+
+    assertTrue(tree.dump(), tree.toStringTree().contains(
+            "(tok_table_or_col tok_default_value)"));
+
+  }
+
+  @Test
+  public void testParseStructFieldNamedDefaultInSetClause() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "update t1 set b = default.field0\n", null).getTree();
+
+    assertFalse(tree.dump(), tree.toStringTree().contains("tok_default_value"));
+  }
+
+  @Test
+  public void testParseStructFieldNamedDefaultInBeginningOdSetClause() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "update t1 set b = default.field0, a = 10\n", null).getTree();
+
+    assertFalse(tree.dump(), tree.toStringTree().contains("tok_default_value"));
+  }
+
+  @Test
+  public void testParseDefaultKeywordInMerge() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "MERGE INTO t1 AS t USING t2 as s ON t.a = s.a\n" +
+                    "WHEN MATCHED THEN UPDATE SET b = defauLt " +
+                    "WHEN NOT MATCHED THEN INSERT VALUES (s.a, DEFAuLT, DEFAULT)", null).getTree();
+
+    assertEquals(tree.dump(), 3, StringUtils.countMatches(tree.toStringTree(), "(tok_table_or_col tok_default_value)"));
+  }
+
+  @Test
+  public void testParseStructNamedDefault() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "select default.src.`end`.key from s_n1\n", null).getTree();
+
+    assertFalse(tree.dump(), tree.toStringTree().contains("tok_default_value"));
+  }
+
+  @Test
+  public void testParseStructFieldNamedDefault() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "select col0.default.key from s_n1\n", null).getTree();
+
+    assertFalse(tree.dump(), tree.toStringTree().contains("tok_default_value"));
+  }
+
+  @Test
+  public void testSelectColumNamedDefault() throws Exception {
+    ASTNode tree = parseDriver.parse(
+            "select default from s_n1\n", null).getTree();
+
+    assertFalse(tree.dump(), tree.toStringTree().contains("tok_default_value"));
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java
index a8221b48b5..03233ca597 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java
@@ -27,7 +27,6 @@ import java.util.Set;
 
 import org.antlr.runtime.TokenRewriteStream;
 import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -432,6 +431,16 @@ public class MergeSemanticAnalyzer extends RewriteSemanticAnalyzer {
       }
       String name = fs.getName();
       if (setColsExprs.containsKey(name)) {
+        ASTNode setColExpr = setColsExprs.get(name);
+        if (setColExpr.getType() == HiveParser.TOK_TABLE_OR_COL &&
+                setColExpr.getChildCount() == 1 && setColExpr.getChild(0).getType() == HiveParser.TOK_DEFAULT_VALUE) {
+          UnparseTranslator defaultValueTranslator = new UnparseTranslator(conf);
+          defaultValueTranslator.enable();
+          defaultValueTranslator.addDefaultValueTranslation(
+                  setColsExprs.get(name), colNameToDefaultConstraint.get(name));
+          defaultValueTranslator.applyTranslations(ctx.getTokenRewriteStream());
+        }
+
         String rhsExp = getMatchedText(setColsExprs.get(name));
         //"set a=5, b=8" - rhsExp picks up the next char (e.g. ',') from the token stream
         switch (rhsExp.charAt(rhsExp.length() - 1)) {
@@ -443,10 +452,6 @@ public class MergeSemanticAnalyzer extends RewriteSemanticAnalyzer {
           //do nothing
         }
 
-        if ("`default`".equalsIgnoreCase(rhsExp.trim())) {
-          rhsExp = MapUtils.getString(colNameToDefaultConstraint, name, "null");
-        }
-
         rewrittenQueryStr.append(rhsExp);
       } else {
         rewrittenQueryStr.append(getSimpleTableName(target))
@@ -627,9 +632,13 @@ public class MergeSemanticAnalyzer extends RewriteSemanticAnalyzer {
         conf, onClauseAsString);
     oca.analyze();
 
+    UnparseTranslator defaultValuesTranslator = new UnparseTranslator(conf);
+    defaultValuesTranslator.enable();
+    List<String> targetSchema = processTableColumnNames(columnListNode, targetTable.getFullyQualifiedName());
+    collectDefaultValues(valuesNode, targetTable, targetSchema, defaultValuesTranslator);
+    defaultValuesTranslator.applyTranslations(ctx.getTokenRewriteStream());
     String valuesClause = getMatchedText(valuesNode);
     valuesClause = valuesClause.substring(1, valuesClause.length() - 1); //strip '(' and ')'
-    valuesClause = replaceDefaultKeywordForMerge(valuesClause, targetTable, columnListNode);
     rewrittenQueryStr.append(valuesClause).append("\n   WHERE ").append(oca.getPredicate());
 
     String extraPredicate = getWhenClausePredicate(whenNotMatchedClause);
@@ -641,29 +650,13 @@ public class MergeSemanticAnalyzer extends RewriteSemanticAnalyzer {
     rewrittenQueryStr.append('\n');
   }
 
-  private String replaceDefaultKeywordForMerge(String valueClause, Table table, ASTNode columnListNode)
-      throws SemanticException {
-    if (!valueClause.toLowerCase().contains("`default`")) {
-      return valueClause;
-    }
-
-    Map<String, String> colNameToDefaultConstraint = getColNameToDefaultValueMap(table);
-    String[] values = valueClause.trim().split(",");
-    String[] replacedValues = new String[values.length];
-
-    // the list of the column names may be set in the query
-    String[] columnNames = columnListNode == null ?
-      table.getAllCols().stream().map(f -> f.getName()).toArray(size -> new String[size]) :
-      columnListNode.getChildren().stream().map(n -> ((ASTNode)n).toString()).toArray(size -> new String[size]);
-
-    for (int i = 0; i < values.length; i++) {
-      if (values[i].trim().toLowerCase().equals("`default`")) {
-        replacedValues[i] = MapUtils.getString(colNameToDefaultConstraint, columnNames[i], "null");
-      } else {
-        replacedValues[i] = values[i];
-      }
+  private void collectDefaultValues(
+          ASTNode valueClause, Table targetTable, List<String> targetSchema, UnparseTranslator unparseTranslator)
+          throws SemanticException {
+    List<String> defaultConstraints = getDefaultConstraints(targetTable, targetSchema);
+    for (int j = 0; j < defaultConstraints.size(); j++) {
+      unparseTranslator.addDefaultValueTranslation((ASTNode) valueClause.getChild(j + 1), defaultConstraints.get(j));
     }
-    return StringUtils.join(replacedValues, ',');
   }
 
   /**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
index bbc90f849d..6e91582998 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
@@ -307,6 +307,10 @@ public abstract class RewriteSemanticAnalyzer extends CalcitePlanner {
     // not, recurse on any children
     if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) {
       ASTNode colName = (ASTNode)node.getChildren().get(0);
+      if (colName.getToken().getType() == HiveParser.TOK_DEFAULT_VALUE) {
+        return;
+      }
+
       assert colName.getToken().getType() == HiveParser.Identifier :
           "Expected column name";
       setRCols.add(normalizeColName(colName.getText()));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9003984567..6114ecf574 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -791,10 +791,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * @return List of default constraints (including NULL if there is no default)
    * @throws SemanticException
    */
-  private List<String> getDefaultConstraints(Table tbl, List<String> targetSchema) throws SemanticException{
+  protected List<String> getDefaultConstraints(Table tbl, List<String> targetSchema) throws SemanticException{
     Map<String, String> colNameToDefaultVal = getColNameToDefaultValueMap(tbl);
     List<String> defaultConstraints = new ArrayList<>();
-    if(targetSchema != null) {
+    if(targetSchema != null && !targetSchema.isEmpty()) {
       for (String colName : targetSchema) {
         defaultConstraints.add(colNameToDefaultVal.get(colName));
       }
@@ -857,7 +857,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (selectExpr.getChildCount() == 1 && selectExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) {
         //first child should be rowid
         if (i != 0 || selectExpr.getChild(0).getChild(0).getText().equals("ROW__ID")) {
-          if (selectExpr.getChild(0).getChild(0).getText().toLowerCase().equals("default")) {
+          if (selectExpr.getChild(0).getChild(0).getType() == HiveParser.TOK_DEFAULT_VALUE) {
             if (defaultConstraints == null) {
               defaultConstraints = getDefaultConstraints(targetTable, null);
             }
@@ -879,24 +879,23 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * @param valueArrClause This is AST for value clause
    * @param targetTable
    * @param targetSchema this is target schema/column schema if specified in query
-   * @throws SemanticException
    */
-  private void replaceDefaultKeyword(ASTNode valueArrClause, Table targetTable, List<String> targetSchema) throws SemanticException{
+  private void replaceDefaultKeyword(ASTNode valueArrClause, Table targetTable, List<String> targetSchema) throws SemanticException {
     List<String> defaultConstraints = null;
-    for(int i=1; i<valueArrClause.getChildCount(); i++) {
-      ASTNode valueClause = (ASTNode)valueArrClause.getChild(i);
+    for (int i = 1; i < valueArrClause.getChildCount(); i++) {
+      ASTNode valueClause = (ASTNode) valueArrClause.getChild(i);
       //skip first child since it is struct
-      for(int j=1; j<valueClause.getChildCount(); j++) {
-        if(valueClause.getChild(j).getType() == HiveParser.TOK_TABLE_OR_COL
-            && valueClause.getChild(j).getChild(0).getText().toLowerCase().equals("default")) {
-          if(defaultConstraints == null) {
+      for (int j = 1; j < valueClause.getChildCount(); j++) {
+        if (valueClause.getChild(j).getType() == HiveParser.TOK_TABLE_OR_COL
+                && valueClause.getChild(j).getChild(0).getType() == HiveParser.TOK_DEFAULT_VALUE) {
+          if (defaultConstraints == null) {
             defaultConstraints = getDefaultConstraints(targetTable, targetSchema);
           }
-          ASTNode newNode = getNodeReplacementforDefault(defaultConstraints.get(j-1));
+          ASTNode newNode = getNodeReplacementforDefault(defaultConstraints.get(j - 1));
           // replace the node in place
           valueClause.replaceChildren(j, j, newNode);
           LOG.debug("DEFAULT keyword replacement - Inserted {} for table: {}", newNode.getText(),
-              targetTable.getTableName());
+                  targetTable.getTableName());
         }
       }
     }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
index 8c5cd852b3..eadafff4ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
@@ -189,6 +189,19 @@ public class UnparseTranslator {
     addTranslation(identifier, replacementText);
   }
 
+  public void addDefaultValueTranslation(ASTNode exprNode, String defaultValue) {
+    if (!(exprNode.getType() == HiveParser.TOK_TABLE_OR_COL
+            && exprNode.getChild(0).getType() == HiveParser.TOK_DEFAULT_VALUE)) {
+      return;
+    }
+
+    if (defaultValue == null) {
+      defaultValue = "NULL";
+    }
+    addTranslation(exprNode, defaultValue);
+  }
+
+
   /**
    * Register a "copy" translation in which a node will be translated into
    * whatever the translation turns out to be for another node (after
diff --git a/ql/src/test/queries/clientpositive/insert_into_default_keyword_2.q b/ql/src/test/queries/clientpositive/insert_into_default_keyword_2.q
new file mode 100644
index 0000000000..c81b128b71
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/insert_into_default_keyword_2.q
@@ -0,0 +1,15 @@
+-- A table has a column named 'default' and try to assign it in an update
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+-- SORT_QUERY_RESULTS
+
+-- with default constraint
+CREATE TABLE t1 (a int, `default` int) stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into t1 values (1, 2), (10, 11);
+
+explain
+update t1 set a = `default`;
+update t1 set a = `default`;
+
+select * from t1;
\ No newline at end of file
diff --git a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword_2.q.out b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword_2.q.out
new file mode 100644
index 0000000000..e74b210254
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword_2.q.out
@@ -0,0 +1,113 @@
+PREHOOK: query: CREATE TABLE t1 (a int, `default` int) stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: CREATE TABLE t1 (a int, `default` int) stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: insert into t1 values (1, 2), (10, 11)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t1
+POSTHOOK: query: insert into t1 values (1, 2), (10, 11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.a SCRIPT []
+POSTHOOK: Lineage: t1.default SCRIPT []
+PREHOOK: query: explain
+update t1 set a = `default`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: explain
+update t1 set a = `default`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), default (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 2 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
+                      null sort order: z
+                      sort order: +
+                      Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                      Statistics: Num rows: 2 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: may be used (ACID table)
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), VALUE._col0 (type: int)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 2 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                      serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                      name: default.t1
+                  Write Type: UPDATE
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.t1
+          Write Type: UPDATE
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+
+PREHOOK: query: update t1 set a = `default`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: update t1 set a = `default`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+11	11
+2	2