You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2019/03/13 18:00:11 UTC

[hive] branch master updated: HIVE-21401: Break up DDLTask - extract Table related operations (Miklos Gergely via Zoltan Haindrich)

This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 95c7d36  HIVE-21401: Break up DDLTask - extract Table related operations (Miklos Gergely via Zoltan Haindrich)
95c7d36 is described below

commit 95c7d361549272c52dfcf637eb2a0360b79b9815
Author: Miklos Gergely <mg...@hortonworks.com>
AuthorDate: Wed Mar 13 18:51:42 2019 +0100

    HIVE-21401: Break up DDLTask - extract Table related operations (Miklos Gergely via Zoltan Haindrich)
    
    Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
---
 .../test/results/positive/accumulo_queries.q.out   |    6 +-
 .../accumulo_single_sourced_multi_insert.q.out     |    3 +-
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |    4 -
 .../test/results/clientnegative/serde_regex.q.out  |   21 +-
 .../results/clientpositive/fileformat_base64.q.out |   11 +-
 .../test/results/clientpositive/serde_regex.q.out  |   19 +-
 .../src/test/results/positive/hbase_ddl.q.out      |    3 +-
 .../src/test/results/positive/hbase_queries.q.out  |    6 +-
 .../hbase_single_sourced_multi_insert.q.out        |    3 +-
 .../src/test/results/positive/hbasestats.q.out     |    3 +-
 .../cli/SemanticAnalysis/CreateTableHook.java      |   67 +-
 .../cli/SemanticAnalysis/HCatSemanticAnalyzer.java |   83 +-
 .../java/org/apache/hive/jdbc/TestJdbcDriver2.java |    2 +-
 .../ql/metadata/DummySemanticAnalyzerHook.java     |    6 +-
 .../ql/metadata/DummySemanticAnalyzerHook1.java    |    7 +-
 .../apache/hadoop/hive/ql/ddl/DDLOperation.java    |   16 +-
 .../hadoop/hive/ql/ddl/DDLOperationContext.java    |   29 +-
 .../org/apache/hadoop/hive/ql/ddl/DDLTask2.java    |    3 +-
 .../org/apache/hadoop/hive/ql/ddl/DDLUtils.java    |  200 +++
 .../org/apache/hadoop/hive/ql/ddl/DDLWork2.java    |    3 +
 .../ql/ddl/database/DescDatabaseOperation.java     |   19 +-
 .../database}/ShowCreateDatabaseDesc.java          |   62 +-
 .../ddl/database/ShowCreateDatabaseOperation.java  |   76 ++
 .../ql/ddl/database/ShowDatabasesOperation.java    |    3 +-
 .../ql/{plan => ddl/table}/CreateTableDesc.java    |   44 +-
 .../{plan => ddl/table}/CreateTableLikeDesc.java   |  132 +-
 .../ql/ddl/table/CreateTableLikeOperation.java     |  211 +++
 .../hive/ql/ddl/table/CreateTableOperation.java    |  168 +++
 .../hive/ql/{plan => ddl/table}/DescTableDesc.java |  159 +--
 .../hive/ql/ddl/table/DescTableOperation.java      |  280 ++++
 .../hadoop/hive/ql/ddl/table/DropTableDesc.java    |   95 ++
 .../hive/ql/ddl/table/DropTableOperation.java      |  148 +++
 .../hive/ql/{plan => ddl/table}/LockTableDesc.java |   49 +-
 .../LockTableOperation.java}                       |   39 +-
 .../{parse => ddl/table}/PreInsertTableDesc.java   |   21 +-
 .../hive/ql/ddl/table/PreInsertTableOperation.java |   55 +
 .../{plan => ddl/table}/ShowCreateTableDesc.java   |   66 +-
 .../ql/ddl/table/ShowCreateTableOperation.java     |  278 ++++
 .../table/ShowTablePropertiesDesc.java}            |   84 +-
 .../ql/ddl/table/ShowTablePropertiesOperation.java |   89 ++
 .../{plan => ddl/table}/ShowTableStatusDesc.java   |  113 +-
 .../ql/ddl/table/ShowTableStatusOperation.java     |   89 ++
 .../ql/{plan => ddl/table}/ShowTablesDesc.java     |  154 +--
 .../hive/ql/ddl/table/ShowTablesOperation.java     |  115 ++
 .../ql/{plan => ddl/table}/TruncateTableDesc.java  |   70 +-
 .../hive/ql/ddl/table/TruncateTableOperation.java  |   89 ++
 .../ql/{plan => ddl/table}/UnlockTableDesc.java    |   29 +-
 .../UnlockTableOperation.java}                     |   39 +-
 .../table/package-info.java}                       |   28 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java    | 1345 +-------------------
 .../java/org/apache/hadoop/hive/ql/exec/Task.java  |    2 +-
 .../repl/bootstrap/load/table/LoadPartitions.java  |    6 +-
 .../exec/repl/bootstrap/load/table/LoadTable.java  |    6 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java    |    2 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java       |    4 +-
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java     |    4 +-
 .../hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java |    4 +-
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |   16 +-
 .../metadata/formatting/JsonMetaDataFormatter.java |    5 +-
 .../metadata/formatting/MetaDataFormatUtils.java   |    4 +-
 .../ql/metadata/formatting/MetaDataFormatter.java  |    9 +-
 .../metadata/formatting/TextMetaDataFormatter.java |   14 +-
 .../hive/ql/optimizer/QueryPlanPostProcessor.java  |   34 +-
 .../hive/ql/parse/AcidExportSemanticAnalyzer.java  |   11 +-
 .../hadoop/hive/ql/parse/DDLSemanticAnalyzer.java  |  108 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java      |    5 +-
 .../apache/hadoop/hive/ql/parse/ParseContext.java  |    2 +-
 .../java/org/apache/hadoop/hive/ql/parse/QB.java   |    2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java     |   24 +-
 .../apache/hadoop/hive/ql/parse/TaskCompiler.java  |    6 +-
 .../repl/load/message/DropPartitionHandler.java    |    6 +-
 .../parse/repl/load/message/DropTableHandler.java  |    8 +-
 .../load/message/TruncatePartitionHandler.java     |    8 +-
 .../repl/load/message/TruncateTableHandler.java    |    8 +-
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java    |  224 +---
 .../hadoop/hive/ql/plan/DropPartitionDesc.java     |   99 ++
 .../apache/hadoop/hive/ql/plan/DropTableDesc.java  |  194 ---
 .../hadoop/hive/ql/plan/ImportTableDesc.java       |    6 +-
 .../apache/hadoop/hive/ql/plan/LoadFileDesc.java   |    1 +
 .../org/apache/hadoop/hive/ql/plan/PlanUtils.java  |    1 +
 .../hadoop/hive/ql/txn/compactor/CompactorMR.java  |    5 +-
 .../hadoop/hive/ql/parse/TestHiveDecimalParse.java |   12 +-
 .../test/queries/clientpositive/db_ddl_explain.q   |   20 +
 .../clientnegative/authorization_explain.q.out     |   15 +-
 .../test/results/clientnegative/avro_decimal.q.out |    2 +-
 .../clientnegative/constraint_duplicate_name.q.out |    2 +-
 .../clientnegative/create_external_acid.q.out      |    2 +-
 .../results/clientnegative/create_not_acid.q.out   |    2 +-
 .../clientnegative/create_table_wrong_regex.q.out  |    2 +-
 .../clientnegative/create_view_failure2.q.out      |    2 +-
 .../create_with_constraints_duplicate_name.q.out   |    2 +-
 .../clientnegative/create_with_fk_constraint.q.out |    2 +-
 .../create_with_fk_pk_same_tab.q.out               |    2 +-
 .../create_with_fk_uk_same_tab.q.out               |    2 +-
 .../clientnegative/create_with_fk_wrong_ref.q.out  |    2 +-
 .../clientnegative/create_with_fk_wrong_ref2.q.out |    2 +-
 .../clientnegative/dbtxnmgr_notablelock.q.out      |    2 +-
 .../clientnegative/dbtxnmgr_notableunlock.q.out    |    2 +-
 ql/src/test/results/clientnegative/deletejar.q.out |    2 +-
 .../results/clientnegative/describe_xpath1.q.out   |    2 +-
 .../results/clientnegative/describe_xpath2.q.out   |    2 +-
 .../results/clientnegative/describe_xpath3.q.out   |    2 +-
 .../results/clientnegative/describe_xpath4.q.out   |    2 +-
 .../clientnegative/drop_table_failure2.q.out       |    2 +-
 .../clientnegative/drop_table_used_by_mv.q.out     |    2 +-
 .../clientnegative/drop_view_failure1.q.out        |    2 +-
 .../results/clientnegative/druid_address.q.out     |    2 +-
 .../results/clientnegative/druid_buckets.q.out     |    2 +-
 .../test/results/clientnegative/druid_case.q.out   |    2 +-
 .../results/clientnegative/druid_datasource.q.out  |    2 +-
 .../results/clientnegative/druid_datasource2.q.out |    2 +-
 .../results/clientnegative/druid_location.q.out    |    2 +-
 .../results/clientnegative/druid_partitions.q.out  |    2 +-
 ql/src/test/results/clientnegative/external1.q.out |    2 +-
 .../results/clientnegative/insert_sorted.q.out     |    2 +-
 ql/src/test/results/clientnegative/lockneg1.q.out  |    2 +-
 ql/src/test/results/clientnegative/lockneg2.q.out  |    2 +-
 ql/src/test/results/clientnegative/lockneg3.q.out  |    2 +-
 .../clientnegative/materialized_view_drop.q.out    |    2 +-
 .../clientnegative/materialized_view_drop2.q.out   |    2 +-
 .../clientnegative/nested_complex_neg.q.out        |    2 +-
 .../test/results/clientnegative/serde_regex.q.out  |    2 +-
 .../test/results/clientnegative/serde_regex3.q.out |    2 +-
 .../special_character_in_tabnames_1.q.out          |    2 +-
 .../clientnegative/strict_managed_tables1.q.out    |    2 +-
 .../clientnegative/strict_managed_tables4.q.out    |    2 +-
 .../clientnegative/strict_managed_tables5.q.out    |    2 +-
 .../results/clientpositive/ambiguitycheck.q.out    |    5 +-
 .../clientpositive/annotate_stats_table.q.out      |   13 +-
 .../clientpositive/create_union_table.q.out        |   13 +-
 ql/src/test/results/clientpositive/ctas.q.out      |   71 +-
 .../test/results/clientpositive/ctas_colname.q.out |   91 +-
 .../ctas_uses_database_location.q.out              |   13 +-
 .../results/clientpositive/db_ddl_explain.q.out    |  171 +++
 .../clientpositive/drop_deleted_partitions.q.out   |    4 +-
 .../clientpositive/drop_multi_partitions.q.out     |    4 +-
 .../druid/druidmini_dynamic_partition.q.out        |   27 +-
 .../clientpositive/druid/druidmini_mv.q.out        |    3 +-
 .../test/results/clientpositive/explain_ddl.q.out  |   88 +-
 .../clientpositive/fileformat_sequencefile.q.out   |   11 +-
 .../results/clientpositive/fileformat_text.q.out   |   11 +-
 .../clientpositive/groupby_duplicate_key.q.out     |   13 +-
 ql/src/test/results/clientpositive/input1.q.out    |    5 +-
 ql/src/test/results/clientpositive/input10.q.out   |    5 +-
 ql/src/test/results/clientpositive/input15.q.out   |   15 +-
 ql/src/test/results/clientpositive/input2.q.out    |    7 +-
 ql/src/test/results/clientpositive/inputddl1.q.out |   13 +-
 ql/src/test/results/clientpositive/inputddl2.q.out |   15 +-
 ql/src/test/results/clientpositive/inputddl3.q.out |   15 +-
 ql/src/test/results/clientpositive/inputddl6.q.out |    9 +-
 ql/src/test/results/clientpositive/llap/ctas.q.out |   71 +-
 .../llap/dynamic_partition_pruning.q.out           |   13 +-
 .../clientpositive/llap/explainuser_1.q.out        |   24 +-
 .../clientpositive/llap/partition_ctas.q.out       |   15 +-
 .../clientpositive/llap/rcfile_createas1.q.out     |   13 +-
 .../clientpositive/llap/semijoin_reddedup.q.out    |   19 +-
 .../results/clientpositive/llap/temp_table.q.out   |   34 +-
 .../test/results/clientpositive/llap/tez_dml.q.out |   13 +-
 .../llap/tez_join_result_complex.q.out             |   26 +-
 .../clientpositive/llap/unionDistinct_1.q.out      |   13 +-
 .../clientpositive/llap/union_top_level.q.out      |   13 +-
 .../llap/vector_char_varchar_1.q.out               |   26 +-
 .../clientpositive/llap/vector_decimal_6.q.out     |   13 +-
 .../llap/vector_windowing_streaming.q.out          |   15 +-
 .../vectorized_dynamic_partition_pruning.q.out     |   13 +-
 ql/src/test/results/clientpositive/merge3.q.out    |   13 +-
 .../clientpositive/nonReservedKeyWords.q.out       |   39 +-
 .../test/results/clientpositive/nonmr_fetch.q.out  |   13 +-
 .../test/results/clientpositive/nullformat.q.out   |   13 +-
 .../results/clientpositive/nullformatCTAS.q.out    |   13 +-
 .../results/clientpositive/orc_createas1.q.out     |   26 +-
 .../results/clientpositive/parallel_orderby.q.out  |   13 +-
 .../results/clientpositive/serde_opencsv.q.out     |   21 +-
 .../test/results/clientpositive/serde_regex.q.out  |   34 +-
 .../test/results/clientpositive/show_tables.q.out  |   14 +-
 .../results/clientpositive/show_tablestatus.q.out  |    3 +
 .../results/clientpositive/skewjoin_noskew.q.out   |   13 +-
 .../clientpositive/skewjoin_onesideskew.q.out      |   13 +-
 .../test/results/clientpositive/smb_mapjoin9.q.out |   13 +-
 .../test/results/clientpositive/spark/ctas.q.out   |   71 +-
 .../clientpositive/spark/parallel_orderby.q.out    |   13 +-
 .../clientpositive/spark/skewjoin_noskew.q.out     |   13 +-
 .../spark/spark_dynamic_partition_pruning.q.out    |   13 +-
 .../clientpositive/spark/spark_explainuser_1.q.out |   24 +-
 ...park_vectorized_dynamic_partition_pruning.q.out |   13 +-
 .../results/clientpositive/spark/temp_table.q.out  |   34 +-
 .../results/clientpositive/spark/union25.q.out     |   13 +-
 .../clientpositive/spark/union_top_level.q.out     |   13 +-
 .../clientpositive/symlink_text_input_format.q.out |   11 +-
 .../clientpositive/temp_table_truncate.q.out       |   10 +-
 .../clientpositive/tez/explainanalyze_1.q.out      |    3 +-
 .../clientpositive/tez/explainanalyze_3.q.out      |   16 +-
 .../results/clientpositive/tez/explainuser_3.q.out |   16 +-
 .../results/clientpositive/truncate_table.q.out    |   32 +-
 ql/src/test/results/clientpositive/union25.q.out   |   13 +-
 .../results/clientpositive/vector_decimal_6.q.out  |   13 +-
 .../clientpositive/vector_tablesample_rows.q.out   |   15 +-
 197 files changed, 3481 insertions(+), 3644 deletions(-)

diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
index ac2d527..d021482 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
@@ -59,8 +59,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Map Reduce
@@ -543,8 +542,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-3
     Map Reduce
diff --git a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
index ac809fa..3fa3f16 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
@@ -70,8 +70,7 @@ STAGE PLANS:
           Table: default.src_x1
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-3
     Map Reduce
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c33d03e..d48f2c8 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1518,10 +1518,6 @@ public class HiveConf extends Configuration {
     CLIPROMPT("hive.cli.prompt", "hive",
         "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
         "Variable substitution will only be invoked at the Hive CLI startup."),
-    CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
-        "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
-        "If the value of this property is -1, then Hive will use the auto-detected terminal width."),
-
     /**
      * @deprecated Use MetastoreConf.FS_HANDLER_CLS
      */
diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out
index 58a4679..65b8e31 100644
--- a/contrib/src/test/results/clientnegative/serde_regex.q.out
+++ b/contrib/src/test/results/clientnegative/serde_regex.q.out
@@ -49,16 +49,15 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: host string, identity string, user string, time string, request string, status int, size int, referer string, agent string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe
-          serde properties:
-            input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
-            output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
-          name: default.serde_regex
+    Create Table
+      columns: host string, identity string, user string, time string, request string, status int, size int, referer string, agent string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe
+      serde properties:
+        input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
+        output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
+      name: default.serde_regex
 
 PREHOOK: query: CREATE TABLE serde_regex(
   host STRING,
@@ -79,4 +78,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
diff --git a/contrib/src/test/results/clientpositive/fileformat_base64.q.out b/contrib/src/test/results/clientpositive/fileformat_base64.q.out
index 8e6a5e4..c204fee 100644
--- a/contrib/src/test/results/clientpositive/fileformat_base64.q.out
+++ b/contrib/src/test/results/clientpositive/fileformat_base64.q.out
@@ -21,12 +21,11 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat
-          output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat
-          name: default.base64_test
+    Create Table
+      columns: key int, value string
+      input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat
+      output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat
+      name: default.base64_test
 
 PREHOOK: query: CREATE TABLE base64_test(key INT, value STRING) STORED AS
   INPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'
diff --git a/contrib/src/test/results/clientpositive/serde_regex.q.out b/contrib/src/test/results/clientpositive/serde_regex.q.out
index 691e254..80bd2e4 100644
--- a/contrib/src/test/results/clientpositive/serde_regex.q.out
+++ b/contrib/src/test/results/clientpositive/serde_regex.q.out
@@ -43,16 +43,15 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: host string, identity string, user string, time string, request string, status string, size string, referer string, agent string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe
-          serde properties:
-            input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
-            output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
-          name: default.serde_regex
+    Create Table
+      columns: host string, identity string, user string, time string, request string, status string, size string, referer string, agent string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.contrib.serde2.RegexSerDe
+      serde properties:
+        input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
+        output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
+      name: default.serde_regex
 
 PREHOOK: query: CREATE TABLE serde_regex(
   host STRING,
diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
index e87240a..7adb474 100644
--- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
@@ -57,8 +57,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Map Reduce
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index 02f46d8..793e01f 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -57,8 +57,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Map Reduce
@@ -543,8 +542,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-3
     Map Reduce
diff --git a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index b15515e..60d0829 100644
--- a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
@@ -70,8 +70,7 @@ STAGE PLANS:
           Table: default.src_x1
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-3
     Map Reduce
diff --git a/hbase-handler/src/test/results/positive/hbasestats.q.out b/hbase-handler/src/test/results/positive/hbasestats.q.out
index 5143522..783708f 100644
--- a/hbase-handler/src/test/results/positive/hbasestats.q.out
+++ b/hbase-handler/src/test/results/positive/hbasestats.q.out
@@ -85,8 +85,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Map Reduce
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
index a377805..8366e5e 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
@@ -27,7 +27,9 @@ import java.util.Map;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -39,7 +41,6 @@ import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.StorageFormat;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatUtil;
@@ -135,18 +136,19 @@ final class CreateTableHook extends HCatSemanticAnalyzerBase {
     throws SemanticException {
 
     if (rootTasks.size() == 0) {
-      // There will be no DDL task created in case if its CREATE TABLE IF
-      // NOT EXISTS
+      // There will be no DDL task created in case if its CREATE TABLE IF NOT EXISTS
       return;
     }
-    CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1))
-      .getWork().getCreateTblDesc();
-    if (desc == null) {
-      // Desc will be null if its CREATE TABLE LIKE. Desc will be
-      // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in
-      // pre-hook. So, desc can never be null.
+    Task<?> t = rootTasks.get(rootTasks.size() - 1);
+    if (!(t instanceof DDLTask2)) {
       return;
     }
+    DDLTask2 task = (DDLTask2)t;
+    DDLDesc d = task.getWork().getDDLDesc();
+    if (!(d instanceof CreateTableDesc)) {
+      return;
+    }
+    CreateTableDesc desc = (CreateTableDesc)d;
     Map<String, String> tblProps = desc.getTblProps();
     if (tblProps == null) {
       // tblProps will be null if user didnt use tblprops in his CREATE
@@ -157,8 +159,7 @@ final class CreateTableHook extends HCatSemanticAnalyzerBase {
 
     // first check if we will allow the user to create table.
     String storageHandler = desc.getStorageHandler();
-    if (StringUtils.isEmpty(storageHandler)) {
-    } else {
+    if (StringUtils.isNotEmpty(storageHandler)) {
       try {
         HiveStorageHandler storageHandlerInst = HCatUtil
           .getStorageHandler(context.getConf(),
@@ -173,33 +174,31 @@ final class CreateTableHook extends HCatSemanticAnalyzerBase {
       }
     }
 
-    if (desc != null) {
-      try {
-        Table table = context.getHive().newTable(desc.getTableName());
-        if (desc.getLocation() != null) {
-          table.setDataLocation(new Path(desc.getLocation()));
-        }
-        if (desc.getStorageHandler() != null) {
-          table.setProperty(
+    try {
+      Table table = context.getHive().newTable(desc.getTableName());
+      if (desc.getLocation() != null) {
+        table.setDataLocation(new Path(desc.getLocation()));
+      }
+      if (desc.getStorageHandler() != null) {
+        table.setProperty(
             org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
             desc.getStorageHandler());
-        }
-        for (Map.Entry<String, String> prop : tblProps.entrySet()) {
-          table.setProperty(prop.getKey(), prop.getValue());
-        }
-        for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
-          table.setSerdeParam(prop.getKey(), prop.getValue());
-        }
-        //TODO: set other Table properties as needed
+      }
+      for (Map.Entry<String, String> prop : tblProps.entrySet()) {
+        table.setProperty(prop.getKey(), prop.getValue());
+      }
+      for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
+        table.setSerdeParam(prop.getKey(), prop.getValue());
+      }
+      //TODO: set other Table properties as needed
 
-        //authorize against the table operation so that location permissions can be checked if any
+      //authorize against the table operation so that location permissions can be checked if any
 
-        if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) {
-          authorize(table, Privilege.CREATE);
-        }
-      } catch (HiveException ex) {
-        throw new SemanticException(ex);
+      if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) {
+        authorize(table, Privilege.CREATE);
       }
+    } catch (HiveException ex) {
+      throw new SemanticException(ex);
     }
 
     desc.setTblProps(tblProps);
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index fd159fe..6d9dd5e 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -25,6 +25,9 @@ import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -38,12 +41,9 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DescTableDesc;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
 import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.hcatalog.common.ErrorType;
@@ -297,51 +297,47 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
       SwitchDatabaseDesc switchDb = (SwitchDatabaseDesc)ddlDesc;
       Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
       authorize(db, Privilege.SELECT);
-    }
-  }
-
-  @Override
-  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work)
-    throws HiveException {
-    // DB opereations, none of them are enforced by Hive right now.
-
-    ShowTablesDesc showTables = work.getShowTblsDesc();
-    if (showTables != null) {
+    } else if (ddlDesc instanceof ShowTablesDesc) {
+      ShowTablesDesc showTables = (ShowTablesDesc)ddlDesc;
       String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase()
-        : showTables.getDbName();
+          : showTables.getDbName();
       authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
-    }
-
-    ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc();
-    if (showTableStatus != null) {
+    } else if (ddlDesc instanceof DescTableDesc) {
+      // we should be careful when authorizing table based on just the
+      // table name. If columns have separate authorization domain, it
+      // must be honored
+      DescTableDesc descTable = (DescTableDesc)ddlDesc;
+      String tableName = extractTableName(descTable.getTableName());
+      authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
+    } else if (ddlDesc instanceof ShowTableStatusDesc) {
+      ShowTableStatusDesc showTableStatus = (ShowTableStatusDesc)ddlDesc;
       String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase()
-        : showTableStatus.getDbName();
+          : showTableStatus.getDbName();
       authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
     }
+  }
 
+  @Override
+  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work)
+    throws HiveException {
     // TODO: add alter database support in HCat
 
     // Table operations.
 
-    DropTableDesc dropTable = work.getDropTblDesc();
-    if (dropTable != null) {
-      if (dropTable.getPartSpecs() == null) {
-        // drop table is already enforced by Hive. We only check for table level location even if the
-        // table is partitioned.
-      } else {
-        //this is actually a ALTER TABLE DROP PARITITION statement
-        for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) {
-          // partitions are not added as write entries in drop partitions in Hive
-          Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName());
-          List<Partition> partitions = null;
-          try {
-            partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString());
-          } catch (Exception e) {
-            throw new HiveException(e);
-          }
-          for (Partition part : partitions) {
-            authorize(part, Privilege.DROP);
-          }
+    DropPartitionDesc dropPartition = work.getDropPartitionDesc();
+    if (dropPartition != null) {
+      //this is actually a ALTER TABLE DROP PARITITION statement
+      for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()) {
+        // partitions are not added as write entries in drop partitions in Hive
+        Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropPartition.getTableName());
+        List<Partition> partitions = null;
+        try {
+          partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString());
+        } catch (Exception e) {
+          throw new HiveException(e);
+        }
+        for (Partition part : partitions) {
+          authorize(part, Privilege.DROP);
         }
       }
     }
@@ -377,15 +373,6 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
       //other alter operations are already supported by Hive
     }
 
-    // we should be careful when authorizing table based on just the
-    // table name. If columns have separate authorization domain, it
-    // must be honored
-    DescTableDesc descTable = work.getDescTblDesc();
-    if (descTable != null) {
-      String tableName = extractTableName(descTable.getTableName());
-      authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
-    }
-
     ShowPartitionsDesc showParts = work.getShowPartsDesc();
     if (showParts != null) {
       String tableName = extractTableName(showParts.getTabName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 14187cc..8f19b7d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -1092,7 +1092,7 @@ public class TestJdbcDriver2 {
     // codes and messages. This should be fixed.
     doTestErrorCase(
         "create table " + tableName + " (key int, value string)",
-        "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask",
+        "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2",
         "08S01", 1);
   }
 
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java
index 3575a16..f988d42 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java
@@ -23,14 +23,14 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 
 public class DummySemanticAnalyzerHook extends AbstractSemanticAnalyzerHook{
 
@@ -92,7 +92,7 @@ class DummyCreateTableHook extends AbstractSemanticAnalyzerHook{
   @Override
   public void postAnalyze(HiveSemanticAnalyzerHookContext context,
       List<Task<? extends Serializable>> rootTasks) throws SemanticException {
-    CreateTableDesc desc = ((DDLTask)rootTasks.get(rootTasks.size()-1)).getWork().getCreateTblDesc();
+    CreateTableDesc desc = (CreateTableDesc) ((DDLTask2)rootTasks.get(rootTasks.size()-1)).getWork().getDDLDesc();
     Map<String,String> tblProps = desc.getTblProps();
     if(tblProps == null) {
       tblProps = new HashMap<String, String>();
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java
index e20ac64..b2b0072 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java
@@ -23,14 +23,14 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 
@@ -62,8 +62,7 @@ public class DummySemanticAnalyzerHook1 extends AbstractSemanticAnalyzerHook {
       return;
     }
 
-    CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1)).getWork()
-        .getCreateTblDesc();
+    CreateTableDesc desc = (CreateTableDesc) ((DDLTask2) rootTasks.get(rootTasks.size() - 1)).getWork().getDDLDesc();
     Map<String, String> tblProps = desc.getTblProps();
     if (tblProps == null) {
       tblProps = new HashMap<String, String>();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
index e349a0a..d556d55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
@@ -18,11 +18,6 @@
 
 package org.apache.hadoop.hive.ql.ddl;
 
-import java.io.DataOutputStream;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,14 +33,5 @@ public abstract class DDLOperation {
     this.context = context;
   }
 
-  public abstract int execute() throws HiveException;
-
-  protected DataOutputStream getOutputStream(Path outputFile) throws HiveException {
-    try {
-      FileSystem fs = outputFile.getFileSystem(context.getConf());
-      return fs.create(outputFile);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
+  public abstract int execute() throws Exception;
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java
index 924f0b3..14744d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hive.ql.ddl;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
@@ -33,12 +35,21 @@ public class DDLOperationContext {
   private final HiveConf conf;
   private final DriverContext driverContext;
   private final MetaDataFormatter formatter;
+  private final DDLTask2 task;
+  private final DDLWork2 work;
+  private final QueryState queryState;
+  private final QueryPlan queryPlan;
 
-  public DDLOperationContext(HiveConf conf, DriverContext driverContext) throws HiveException {
+  public DDLOperationContext(HiveConf conf, DriverContext driverContext, DDLTask2 task, DDLWork2 work,
+      QueryState queryState, QueryPlan queryPlan) throws HiveException {
     this.db = Hive.get(conf);
     this.conf = conf;
     this.driverContext = driverContext;
     this.formatter = MetaDataFormatUtils.getFormatter(conf);
+    this.task = task;
+    this.work = work;
+    this.queryState = queryState;
+    this.queryPlan = queryPlan;
   }
 
   public Hive getDb() {
@@ -56,4 +67,20 @@ public class DDLOperationContext {
   public MetaDataFormatter getFormatter() {
     return formatter;
   }
+
+  public DDLTask2 getTask() {
+    return task;
+  }
+
+  public DDLWork2 getWork() {
+    return work;
+  }
+
+  public QueryState getQueryState() {
+    return queryState;
+  }
+
+  public QueryPlan getQueryPlan() {
+    return queryPlan;
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java
index 068e1e7..1f9a0bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java
@@ -65,7 +65,8 @@ public final class DDLTask2 extends Task<DDLWork2> implements Serializable {
       DDLDesc ddlDesc = work.getDDLDesc();
 
       if (DESC_TO_OPARATION.containsKey(ddlDesc.getClass())) {
-        DDLOperationContext context = new DDLOperationContext(conf, driverContext);
+        DDLOperationContext context = new DDLOperationContext(conf, driverContext, this, (DDLWork2)work, queryState,
+            queryPlan);
         Class<? extends DDLOperation> ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass());
         Constructor<? extends DDLOperation> constructor =
             ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
new file mode 100644
index 0000000..c3d5f90
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hive.common.util.HiveStringUtils;
+import org.apache.hive.common.util.ReflectionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Utilities used by some DDLOperations.
+ */
+public final class DDLUtils {
+  private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask");
+
+  private DDLUtils() {
+    throw new UnsupportedOperationException("DDLUtils should not be instantiated");
+  }
+
+  public static DataOutputStream getOutputStream(Path outputFile, DDLOperationContext context) throws HiveException {
+    try {
+      FileSystem fs = outputFile.getFileSystem(context.getConf());
+      return fs.create(outputFile);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  /**
+   * There are many places where "duplicate" Read/WriteEnity objects are added.  The way this was
+   * initially implemented, the duplicate just replaced the previous object.
+   * (work.getOutputs() is a Set and WriteEntity#equals() relies on name)
+   * This may be benign for ReadEntity and perhaps was benign for WriteEntity before WriteType was
+   * added. Now that WriteEntity has a WriteType it replaces it with one with possibly different
+   * {@link org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType}. It's hard to imagine
+   * how this is desirable.
+   *
+   * As of HIVE-14993, WriteEntity with different WriteType must be considered different.
+   * So WriteEntity created in DDLTask cause extra output in golden files, but only because
+   * DDLTask sets a different WriteType for the same Entity.
+   *
+   * In the spirit of bug-for-bug compatibility, this method ensures we only add new
+   * WriteEntity if it's really new.
+   *
+   * @return {@code true} if item was added
+   */
+  public static boolean addIfAbsentByName(WriteEntity newWriteEntity, Set<WriteEntity> outputs) {
+    for(WriteEntity writeEntity : outputs) {
+      if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) {
+        LOG.debug("Ignoring request to add {} because {} is present", newWriteEntity.toStringDetail(),
+            writeEntity.toStringDetail());
+        return false;
+      }
+    }
+    outputs.add(newWriteEntity);
+    return true;
+  }
+
+  public static boolean addIfAbsentByName(WriteEntity newWriteEntity, DDLOperationContext context) {
+    return addIfAbsentByName(newWriteEntity, context.getWork().getOutputs());
+  }
+
+  /**
+   * Check if the given serde is valid.
+   */
+  public static void validateSerDe(String serdeName, DDLOperationContext context) throws HiveException {
+    validateSerDe(serdeName, context.getConf());
+  }
+
+  public static void validateSerDe(String serdeName, HiveConf conf) throws HiveException {
+    try {
+      Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName).
+          asSubclass(Deserializer.class), conf);
+      if (d != null) {
+        LOG.debug("Found class for {}", serdeName);
+      }
+    } catch (Exception e) {
+      throw new HiveException("Cannot validate serde: " + serdeName, e);
+    }
+  }
+
+  /**
+   * Validate if the given table/partition is eligible for update.
+   *
+   * @param db Database.
+   * @param tableName Table name of format db.table
+   * @param partSpec Partition spec for the partition
+   * @param replicationSpec Replications specification
+   *
+   * @return boolean true if allow the operation
+   * @throws HiveException
+   */
+  public static boolean allowOperationInReplicationScope(Hive db, String tableName, Map<String, String> partSpec,
+      ReplicationSpec replicationSpec) throws HiveException {
+    if ((null == replicationSpec) || (!replicationSpec.isInReplicationScope())) {
+      // Always allow the operation if it is not in replication scope.
+      return true;
+    }
+    // If the table/partition exist and is older than the event, then just apply the event else noop.
+    Table existingTable = db.getTable(tableName, false);
+    if ((existingTable != null) && replicationSpec.allowEventReplacementInto(existingTable.getParameters())) {
+      // Table exists and is older than the update. Now, need to ensure if update allowed on the partition.
+      if (partSpec != null) {
+        Partition existingPtn = db.getPartition(existingTable, partSpec, false);
+        return ((existingPtn != null) && replicationSpec.allowEventReplacementInto(existingPtn.getParameters()));
+      }
+
+      // Replacement is allowed as the existing table is older than event
+      return true;
+    }
+
+    // The table is missing either due to drop/rename which follows the operation.
+    // Or the existing table is newer than our update. So, don't allow the update.
+    return false;
+  }
+
+  public static String propertiesToString(Map<String, String> props, List<String> exclude) {
+    if (props.isEmpty()) {
+      return "";
+    }
+
+    Map<String, String> sortedProperties = new TreeMap<String, String>(props);
+    List<String> realProps = new ArrayList<String>();
+    for (Map.Entry<String, String> e : sortedProperties.entrySet()) {
+      if (e.getValue() != null && (exclude == null || !exclude.contains(e.getKey()))) {
+        realProps.add("  '" + e.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(e.getValue()) + "'");
+      }
+    }
+    return StringUtils.join(realProps, ", \n");
+  }
+
+  public static void writeToFile(String data, String file, DDLOperationContext context) throws IOException {
+    if (StringUtils.isEmpty(data)) {
+      return;
+    }
+
+    Path resFile = new Path(file);
+    FileSystem fs = resFile.getFileSystem(context.getConf());
+    try (FSDataOutputStream out = fs.create(resFile);
+         OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8")) {
+      writer.write(data);
+      writer.write((char) Utilities.newLineCode);
+      writer.flush();
+    }
+  }
+
+  public static void appendNonNull(StringBuilder builder, Object value) {
+    appendNonNull(builder, value, false);
+  }
+
+  public static void appendNonNull(StringBuilder builder, Object value, boolean firstColumn) {
+    if (!firstColumn) {
+      builder.append((char)Utilities.tabCode);
+    } else if (builder.length() > 0) {
+      builder.append((char)Utilities.newLineCode);
+    }
+    if (value != null) {
+      builder.append(value);
+    }
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java
index d2fbe8f..a2f49b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hive.ql.ddl;
 
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 import java.io.Serializable;
 
@@ -67,6 +69,7 @@ public final class DDLWork2 implements Serializable {
     this.needLock = needLock;
   }
 
+  @Explain(skipHeader = true, explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public DDLDesc getDDLDesc() {
     return ddlDesc;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java
index efaf389..801ac62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/DescDatabaseOperation.java
@@ -19,16 +19,16 @@
 package org.apache.hadoop.hive.ql.ddl.database;
 
 import java.io.DataOutputStream;
-import java.util.Map;
+import java.util.SortedMap;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.DDLOperation;
 import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
@@ -44,21 +44,15 @@ public class DescDatabaseOperation extends DDLOperation {
 
   @Override
   public int execute() throws HiveException {
-    try (DataOutputStream outStream = getOutputStream(new Path(desc.getResFile()))) {
+    try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) {
       Database database = context.getDb().getDatabase(desc.getDatabaseName());
       if (database == null) {
         throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName());
       }
 
-      Map<String, String> params = null;
+      SortedMap<String, String> params = null;
       if (desc.isExt()) {
-        params = database.getParameters();
-      }
-
-      // If this is a q-test, let's order the params map (lexicographically) by
-      // key. This is to get consistent param ordering between Java7 and Java8.
-      if (HiveConf.getBoolVar(context.getConf(), HiveConf.ConfVars.HIVE_IN_TEST) && params != null) {
-        params = new TreeMap<String, String>(params);
+        params = new TreeMap<>(database.getParameters());
       }
 
       String location = database.getLocationUri();
@@ -66,9 +60,8 @@ public class DescDatabaseOperation extends DDLOperation {
         location = "location/in/test";
       }
 
-      PrincipalType ownerType = database.getOwnerType();
       context.getFormatter().showDatabaseDescription(outStream, database.getName(), database.getDescription(),
-          location, database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params);
+          location, database.getOwnerName(), database.getOwnerType(), params);
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
     }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java
similarity index 53%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java
index ba5d06e..29dc266 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java
@@ -16,79 +16,47 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.database;
 
 import java.io.Serializable;
 
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
 /**
- * ShowCreateDatabaseDesc.
- *
+ * DDL task description for SHOW CREATE DATABASE commands.
  */
-@Explain(displayName = "Show Create Database",
-    explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowCreateDatabaseDesc extends DDLDesc implements Serializable {
+@Explain(displayName = "Show Create Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class ShowCreateDatabaseDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  String resFile;
-  String dbName;
-
-  /**
-   * thrift ddl for the result of showcreatedatabase.
-   */
-  private static final String schema = "createdb_stmt#string";
 
-  public String getSchema() {
-    return schema;
+  static {
+    DDLTask2.registerOperation(ShowCreateDatabaseDesc.class, ShowCreateDatabaseOperation.class);
   }
 
-  /**
-   * For serialization use only.
-   */
-  public ShowCreateDatabaseDesc() {
-  }
+  private final String resFile;
+  private final String dbName;
 
   /**
-   * @param resFile
-   * @param dbName
-   *          name of database to show
+   * Thrift ddl for the result of showcreatedatabase.
    */
+  public static final String SCHEMA = "createdb_stmt#string";
+
   public ShowCreateDatabaseDesc(String dbName, String resFile) {
     this.dbName = dbName;
     this.resFile = resFile;
   }
 
-  /**
-   * @return the resFile
-   */
   @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
   public String getResFile() {
     return resFile;
   }
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
-  }
-
-  /**
-   * @return the databaseName
-   */
-  @Explain(displayName = "database name",
-      explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getDatabaseName() {
     return dbName;
   }
-
-  /**
-   * @param dbName
-   *          the dbName to set
-   */
-  public void setDatabaseName(String dbName) {
-    this.dbName = dbName;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java
new file mode 100644
index 0000000..100ac95
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.database;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+
+import java.io.DataOutputStream;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hive.common.util.HiveStringUtils;
+
+/**
+ * Operation process showing the creation of a database.
+ */
+public class ShowCreateDatabaseOperation extends DDLOperation {
+  private final ShowCreateDatabaseDesc desc;
+
+  public ShowCreateDatabaseOperation(DDLOperationContext context, ShowCreateDatabaseDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context);
+    try {
+      return showCreateDatabase(outStream);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+  }
+
+  private int showCreateDatabase(DataOutputStream outStream) throws Exception {
+    Database database = context.getDb().getDatabase(desc.getDatabaseName());
+
+    StringBuilder createDbCommand = new StringBuilder();
+    createDbCommand.append("CREATE DATABASE `").append(database.getName()).append("`\n");
+    if (database.getDescription() != null) {
+      createDbCommand.append("COMMENT\n  '");
+      createDbCommand.append(HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n");
+    }
+    createDbCommand.append("LOCATION\n  '");
+    createDbCommand.append(database.getLocationUri()).append("'\n");
+    String propertiesToString = DDLUtils.propertiesToString(database.getParameters(), null);
+    if (!propertiesToString.isEmpty()) {
+      createDbCommand.append("WITH DBPROPERTIES (\n");
+      createDbCommand.append(propertiesToString).append(")\n");
+    }
+
+    outStream.write(createDbCommand.toString().getBytes("UTF-8"));
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java
index 30c4db8..476762f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowDatabasesOperation.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.DDLOperation;
 import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.io.IOUtils;
 
@@ -53,7 +54,7 @@ public class ShowDatabasesOperation extends DDLOperation {
     LOG.info("Found {} database(s) matching the SHOW DATABASES statement.", databases.size());
 
     // write the results in the file
-    DataOutputStream outStream = getOutputStream(new Path(desc.getResFile()));
+    DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context);
     try {
       context.getFormatter().showDatabases(outStream, databases);
     } catch (Exception e) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java
similarity index 96%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java
index 4514af1..15fe4a9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -43,8 +43,10 @@ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -54,6 +56,10 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ParseUtils;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
+import org.apache.hadoop.hive.ql.plan.ValidationUtility;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
@@ -62,15 +68,18 @@ import org.apache.hadoop.mapred.OutputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
- * CreateTableDesc.
- *
+ * DDL task description for CREATE TABLE commands.
  */
 @Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class CreateTableDesc extends DDLDesc implements Serializable {
+public class CreateTableDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  private static Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class);
+
+  static {
+    DDLTask2.registerOperation(CreateTableDesc.class, CreateTableOperation.class);
+  }
+
   String databaseName;
   String tableName;
   boolean isExternal;
@@ -303,7 +312,9 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
     return defaultConstraints;
   }
 
-  public List<SQLCheckConstraint> getCheckConstraints() { return checkConstraints; }
+  public List<SQLCheckConstraint> getCheckConstraints() {
+    return checkConstraints;
+  }
 
   @Explain(displayName = "bucket columns")
   public List<String> getBucketCols() {
@@ -536,13 +547,10 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
 
     if (this.getStorageHandler() == null) {
       try {
-        Class<?> origin = Class.forName(this.getOutputFormat(), true,
-          Utilities.getSessionSpecifiedClassLoader());
-        Class<? extends OutputFormat> replaced = HiveFileFormatUtils
-          .getOutputFormatSubstitute(origin);
+        Class<?> origin = Class.forName(this.getOutputFormat(), true, Utilities.getSessionSpecifiedClassLoader());
+        Class<? extends OutputFormat> replaced = HiveFileFormatUtils.getOutputFormatSubstitute(origin);
         if (!HiveOutputFormat.class.isAssignableFrom(replaced)) {
-          throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE
-            .getMsg());
+          throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg());
         }
       } catch (ClassNotFoundException e) {
         throw new SemanticException(ErrorMsg.CLASSPATH_ERROR.getMsg(), e);
@@ -766,7 +774,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
     } else {
       // let's validate that the serde exists
       serDeClassName = getSerName();
-      DDLTask.validateSerDe(serDeClassName, conf);
+      DDLUtils.validateSerDe(serDeClassName, conf);
     }
     tbl.setSerializationLib(serDeClassName);
 
@@ -838,9 +846,9 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
       tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
     }
 
-    if (DDLTask.doesTableNeedLocation(tbl)) {
+    if (CreateTableOperation.doesTableNeedLocation(tbl)) {
       // If location is specified - ensure that it is a full qualified name
-      DDLTask.makeLocationQualified(tbl.getDbName(), tbl, conf);
+      CreateTableOperation.makeLocationQualified(tbl, conf);
     }
 
     if (isExternal()) {
@@ -925,8 +933,6 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
     return initialMmWriteId;
   }
 
-  
-
   public FileSinkDesc getAndUnsetWriter() {
     FileSinkDesc fsd = writer;
     writer = null;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java
similarity index 52%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java
index 2cc0712..6652b79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java
@@ -16,47 +16,50 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 import java.util.Map;
 
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
- * CreateTableLikeDesc.
- *
+ * DDL task description for CREATE TABLE LIKE commands.
  */
 @Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class CreateTableLikeDesc extends DDLDesc implements Serializable {
+public class CreateTableLikeDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  String tableName;
-  boolean isExternal;
-  String defaultInputFormat;
-  String defaultOutputFormat;
-  String defaultSerName;
-  Map<String, String> defaultSerdeProps;
-  String location;
-  Map<String, String> tblProps;
-  boolean ifNotExists;
-  String likeTableName;
-  boolean isTemporary = false;
-  boolean isUserStorageFormat = false;
-
-  public CreateTableLikeDesc() {
-  }
 
-  public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary,
-      String defaultInputFormat, String defaultOutputFormat, String location,
-      String defaultSerName, Map<String, String> defaultSerdeProps, Map<String, String> tblProps,
-      boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) {
+  static {
+    DDLTask2.registerOperation(CreateTableLikeDesc.class, CreateTableLikeOperation.class);
+  }
+
+  private final String tableName;
+  private final boolean isExternal;
+  private final boolean isTemporary;
+  private final String defaultInputFormat;
+  private final String defaultOutputFormat;
+  private final String location;
+  private final String defaultSerName;
+  private final Map<String, String> defaultSerdeProps;
+  private final Map<String, String> tblProps;
+  private final boolean ifNotExists;
+  private final String likeTableName;
+  private final boolean isUserStorageFormat;
+
+  public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, String defaultInputFormat,
+      String defaultOutputFormat, String location, String defaultSerName, Map<String, String> defaultSerdeProps,
+      Map<String, String> tblProps, boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) {
     this.tableName = tableName;
     this.isExternal = isExternal;
     this.isTemporary = isTemporary;
-    this.defaultInputFormat=defaultInputFormat;
-    this.defaultOutputFormat=defaultOutputFormat;
-    this.defaultSerName=defaultSerName;
-    this.defaultSerdeProps=defaultSerdeProps;
+    this.defaultInputFormat = defaultInputFormat;
+    this.defaultOutputFormat = defaultOutputFormat;
+    this.defaultSerName = defaultSerName;
+    this.defaultSerdeProps = defaultSerdeProps;
     this.location = location;
     this.tblProps = tblProps;
     this.ifNotExists = ifNotExists;
@@ -69,131 +72,56 @@ public class CreateTableLikeDesc extends DDLDesc implements Serializable {
     return ifNotExists;
   }
 
-  public void setIfNotExists(boolean ifNotExists) {
-    this.ifNotExists = ifNotExists;
-  }
-
   @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getTableName() {
     return tableName;
   }
 
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
   @Explain(displayName = "default input format")
   public String getDefaultInputFormat() {
     return defaultInputFormat;
   }
 
-  public void setInputFormat(String inputFormat) {
-    this.defaultInputFormat = inputFormat;
-  }
-
   @Explain(displayName = "default output format")
   public String getDefaultOutputFormat() {
     return defaultOutputFormat;
   }
 
-  public void setOutputFormat(String outputFormat) {
-    this.defaultOutputFormat = outputFormat;
-  }
-
   @Explain(displayName = "location")
   public String getLocation() {
     return location;
   }
 
-  public void setLocation(String location) {
-    this.location = location;
-  }
-
   @Explain(displayName = "isExternal", displayOnlyOnTrue = true)
   public boolean isExternal() {
     return isExternal;
   }
 
-  public void setExternal(boolean isExternal) {
-    this.isExternal = isExternal;
-  }
-
-  /**
-   * @return the default serDeName
-   */
   @Explain(displayName = "default serde name")
   public String getDefaultSerName() {
     return defaultSerName;
   }
 
-  /**
-   * @param serName
-   *          the serName to set
-   */
-  public void setDefaultSerName(String serName) {
-    this.defaultSerName = serName;
-  }
-
-  /**
-   * @return the default serDe properties
-   */
   @Explain(displayName = "serde properties")
   public Map<String, String> getDefaultSerdeProps() {
     return defaultSerdeProps;
   }
 
-  /**
-   * @param serdeProps
-   *          the default serde properties to set
-   */
-  public void setDefaultSerdeProps(Map<String, String> serdeProps) {
-    this.defaultSerdeProps = serdeProps;
-  }
-
   @Explain(displayName = "like")
   public String getLikeTableName() {
     return likeTableName;
   }
 
-  public void setLikeTableName(String likeTableName) {
-    this.likeTableName = likeTableName;
-  }
-
-  /**
-   * @return the table properties
-   */
   @Explain(displayName = "table properties")
   public Map<String, String> getTblProps() {
     return tblProps;
   }
 
-  /**
-   * @param tblProps
-   *          the table properties to set
-   */
-  public void setTblProps(Map<String, String> tblProps) {
-    this.tblProps = tblProps;
-  }
-
-  /**
-   * @return the isTemporary
-   */
   @Explain(displayName = "isTemporary", displayOnlyOnTrue = true)
   public boolean isTemporary() {
     return isTemporary;
   }
 
-  /**
-   * @param isTemporary table is Temporary or not.
-   */
-  public void setTemporary(boolean isTemporary) {
-    this.isTemporary = isTemporary;
-  }
-
-  /**
-   * True if user has specified storage format in query
-   * @return boolean
-   */
   public boolean isUserStorageFormat() {
     return this.isUserStorageFormat;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java
new file mode 100644
index 0000000..6ac6b10
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.PartitionManagementTask;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.SerDeSpec;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+import org.apache.hive.common.util.AnnotationUtils;
+
+/**
+ * Operation process of creating a table like an existing one.
+ */
+public class CreateTableLikeOperation extends DDLOperation {
+  private final CreateTableLikeDesc desc;
+
+  public CreateTableLikeOperation(DDLOperationContext context, CreateTableLikeDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    // Get the existing table
+    Table oldtbl = context.getDb().getTable(desc.getLikeTableName());
+    Table tbl;
+    if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW || oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) {
+      tbl = createViewLikeTable(oldtbl);
+    } else {
+      tbl = createTableLikeTable(oldtbl);
+    }
+
+    // If location is specified - ensure that it is a full qualified name
+    if (CreateTableOperation.doesTableNeedLocation(tbl)) {
+      CreateTableOperation.makeLocationQualified(tbl, context.getConf());
+    }
+
+    if (desc.getLocation() == null && !tbl.isPartitioned() &&
+        context.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(),
+          MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
+    }
+
+    // create the table
+    context.getDb().createTable(tbl, desc.getIfNotExists());
+    DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context);
+    return 0;
+  }
+
+  private Table createViewLikeTable(Table oldtbl) throws HiveException {
+    Table tbl;
+    String targetTableName = desc.getTableName();
+    tbl = context.getDb().newTable(targetTableName);
+
+    if (desc.getTblProps() != null) {
+      tbl.getTTable().getParameters().putAll(desc.getTblProps());
+    }
+
+    tbl.setTableType(TableType.MANAGED_TABLE);
+
+    if (desc.isExternal()) {
+      tbl.setProperty("EXTERNAL", "TRUE");
+      tbl.setTableType(TableType.EXTERNAL_TABLE);
+      // partition discovery is on by default
+      tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true");
+    }
+
+    tbl.setFields(oldtbl.getCols());
+    tbl.setPartCols(oldtbl.getPartCols());
+
+    if (desc.getDefaultSerName() == null) {
+      LOG.info("Default to LazySimpleSerDe for table {}", targetTableName);
+      tbl.setSerializationLib(LazySimpleSerDe.class.getName());
+    } else {
+      // let's validate that the serde exists
+      DDLUtils.validateSerDe(desc.getDefaultSerName(), context);
+      tbl.setSerializationLib(desc.getDefaultSerName());
+    }
+
+    if (desc.getDefaultSerdeProps() != null) {
+      for (Map.Entry<String, String> e : desc.getDefaultSerdeProps().entrySet()) {
+        tbl.setSerdeParam(e.getKey(), e.getValue());
+      }
+    }
+
+    tbl.setInputFormatClass(desc.getDefaultInputFormat());
+    tbl.setOutputFormatClass(desc.getDefaultOutputFormat());
+    tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName());
+    tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
+
+    return tbl;
+  }
+
+  private Table createTableLikeTable(Table oldtbl) throws SemanticException, HiveException {
+    Table tbl = oldtbl;
+
+    // find out database name and table name of target table
+    String targetTableName = desc.getTableName();
+    String[] names = Utilities.getDbTableName(targetTableName);
+
+    tbl.setDbName(names[0]);
+    tbl.setTableName(names[1]);
+
+    // using old table object, hence reset the owner to current user for new table.
+    tbl.setOwner(SessionState.getUserFromAuthenticator());
+
+    if (desc.getLocation() != null) {
+      tbl.setDataLocation(new Path(desc.getLocation()));
+    } else {
+      tbl.unsetDataLocation();
+    }
+
+    Class<? extends Deserializer> serdeClass;
+    try {
+      serdeClass = oldtbl.getDeserializerClass();
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+    // We should copy only those table parameters that are specified in the config.
+    SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class);
+
+    Set<String> retainer = new HashSet<String>();
+    // for non-native table, property storage_handler should be retained
+    retainer.add(META_TABLE_STORAGE);
+    if (spec != null && spec.schemaProps() != null) {
+      retainer.addAll(Arrays.asList(spec.schemaProps()));
+    }
+
+    String paramsStr = HiveConf.getVar(context.getConf(), HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST);
+    if (paramsStr != null) {
+      retainer.addAll(Arrays.asList(paramsStr.split(",")));
+    }
+
+    Map<String, String> params = tbl.getParameters();
+    if (!retainer.isEmpty()) {
+      params.keySet().retainAll(retainer);
+    } else {
+      params.clear();
+    }
+
+    if (desc.getTblProps() != null) {
+      params.putAll(desc.getTblProps());
+    }
+
+    if (desc.isUserStorageFormat()) {
+      tbl.setInputFormatClass(desc.getDefaultInputFormat());
+      tbl.setOutputFormatClass(desc.getDefaultOutputFormat());
+      tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName());
+      tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
+      if (desc.getDefaultSerName() == null) {
+        LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName);
+        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
+      } else {
+        // let's validate that the serde exists
+        DDLUtils.validateSerDe(desc.getDefaultSerName(), context);
+        tbl.setSerializationLib(desc.getDefaultSerName());
+      }
+    }
+
+    tbl.getTTable().setTemporary(desc.isTemporary());
+    tbl.getTTable().unsetId();
+
+    if (desc.isExternal()) {
+      tbl.setProperty("EXTERNAL", "TRUE");
+      tbl.setTableType(TableType.EXTERNAL_TABLE);
+      // partition discovery is on by default
+      tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true");
+    } else {
+      tbl.getParameters().remove("EXTERNAL");
+    }
+
+    return tbl;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java
new file mode 100644
index 0000000..af39c16
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.Constants;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+/**
+ * Operation process of creating a table.
+ */
+public class CreateTableOperation extends DDLOperation {
+  private final CreateTableDesc desc;
+
+  public CreateTableOperation(DDLOperationContext context, CreateTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    // create the table
+    Table tbl = desc.toTable(context.getConf());
+    LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation());
+
+    if (desc.getReplicationSpec().isInReplicationScope() && (!desc.getReplaceMode())){
+      // if this is a replication spec, then replace-mode semantics might apply.
+      // if we're already asking for a table replacement, then we can skip this check.
+      // however, otherwise, if in replication scope, and we've not been explicitly asked
+      // to replace, we should check if the object we're looking at exists, and if so,
+      // trigger replace-mode semantics.
+      Table existingTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName(), false);
+      if (existingTable != null){
+        if (desc.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())) {
+          desc.setReplaceMode(true); // we replace existing table.
+          ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters());
+        } else {
+          LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", desc.getTableName());
+          return 0; // no replacement, the existing table state is newer than our update.
+        }
+      }
+    }
+
+    // create the table
+    if (desc.getReplaceMode()) {
+      createTableReplaceMode(tbl);
+    } else {
+      createTableNonReplaceMode(tbl);
+    }
+
+    DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context);
+    return 0;
+  }
+
+  private void createTableReplaceMode(Table tbl) throws HiveException {
+    ReplicationSpec replicationSpec = desc.getReplicationSpec();
+    long writeId = 0;
+    EnvironmentContext environmentContext = null;
+    if (replicationSpec != null && replicationSpec.isInReplicationScope()) {
+      if (replicationSpec.isMigratingToTxnTable()) {
+        // for migration we start the transaction and allocate write id in repl txn task for migration.
+        String writeIdPara = context.getConf().get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID);
+        if (writeIdPara == null) {
+          throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration");
+        }
+        writeId = Long.parseLong(writeIdPara);
+      } else {
+        writeId = desc.getReplWriteId();
+      }
+
+      // In case of replication statistics is obtained from the source, so do not update those
+      // on replica. Since we are not replicating statisics for transactional tables, do not do
+      // so for transactional tables right now.
+      if (!AcidUtils.isTransactionalTable(desc)) {
+        environmentContext = new EnvironmentContext();
+        environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+      }
+    }
+
+    // replace-mode creates are really alters using CreateTableDesc.
+    context.getDb().alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, environmentContext,
+        true, writeId);
+  }
+
+  private void createTableNonReplaceMode(Table tbl) throws HiveException {
+    if (CollectionUtils.isNotEmpty(desc.getPrimaryKeys()) ||
+        CollectionUtils.isNotEmpty(desc.getForeignKeys()) ||
+        CollectionUtils.isNotEmpty(desc.getUniqueConstraints()) ||
+        CollectionUtils.isNotEmpty(desc.getNotNullConstraints()) ||
+        CollectionUtils.isNotEmpty(desc.getDefaultConstraints()) ||
+        CollectionUtils.isNotEmpty(desc.getCheckConstraints())) {
+      context.getDb().createTable(tbl, desc.getIfNotExists(), desc.getPrimaryKeys(), desc.getForeignKeys(),
+          desc.getUniqueConstraints(), desc.getNotNullConstraints(), desc.getDefaultConstraints(),
+          desc.getCheckConstraints());
+    } else {
+      context.getDb().createTable(tbl, desc.getIfNotExists());
+    }
+
+    if (desc.isCTAS()) {
+      Table createdTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName());
+      DataContainer dc = new DataContainer(createdTable.getTTable());
+      context.getQueryState().getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols());
+    }
+  }
+
+  public static boolean doesTableNeedLocation(Table tbl) {
+    // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers,
+    // this method could be moved to the HiveStorageHandler interface.
+    boolean retval = true;
+    if (tbl.getStorageHandler() != null) {
+      // TODO: why doesn't this check class name rather than toString?
+      String sh = tbl.getStorageHandler().toString();
+      retval = !"org.apache.hadoop.hive.hbase.HBaseStorageHandler".equals(sh) &&
+          !Constants.DRUID_HIVE_STORAGE_HANDLER_ID.equals(sh) &&
+          !Constants.JDBC_HIVE_STORAGE_HANDLER_ID.equals(sh) &&
+          !"org.apache.hadoop.hive.accumulo.AccumuloStorageHandler".equals(sh);
+    }
+    return retval;
+  }
+
+  public static void makeLocationQualified(Table table, HiveConf conf) throws HiveException {
+    StorageDescriptor sd = table.getTTable().getSd();
+    // If the table's location is currently unset, it is left unset, allowing the metastore to
+    // fill in the table's location.
+    // Note that the previous logic for some reason would make a special case if the DB was the
+    // default database, and actually attempt to generate a  location.
+    // This seems incorrect and uncessary, since the metastore is just as able to fill in the
+    // default table location in the case of the default DB, as it is for non-default DBs.
+    Path path = null;
+    if (sd.isSetLocation()) {
+      path = new Path(sd.getLocation());
+    }
+    if (path != null) {
+      sd.setLocation(Utilities.getQualifiedPath(conf, path));
+    }
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java
similarity index 52%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java
index ee50232..0cfffd2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java
@@ -16,166 +16,85 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
 /**
- * DescTableDesc.
- *
+ * DDL task description for DESC table_name commands.
  */
 @Explain(displayName = "Describe Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class DescTableDesc extends DDLDesc implements Serializable {
-  public void setPartSpec(Map<String, String> partSpec) {
-    this.partSpec = partSpec;
-  }
-
+public class DescTableDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
 
-  String tableName;
-  Map<String, String> partSpec;
-  String resFile;
-
-  String colPath;
-  boolean isExt;
-  boolean isFormatted;
-
-  /**
-   * table name for the result of describe table.
-   */
-  private static final String table = "describe";
-  /**
-   * thrift ddl for the result of describe table.
-   */
-  private static final String schema = "col_name,data_type,comment#string:string:string";
-  private static final String colStatsSchema = "col_name,data_type,min,max,num_nulls,"
-      + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment"
-      + "#string:string:string:string:string:string:string:string:string:string:string:string";
-
-  public DescTableDesc() {
+  static {
+    DDLTask2.registerOperation(DescTableDesc.class, DescTableOperation.class);
   }
 
-  /**
-   * @param partSpec
-   * @param resFile
-   * @param tableName
-   */
-  public DescTableDesc(Path resFile, String tableName,
-      Map<String, String> partSpec, String colPath) {
-    this.isExt = false;
-    this.isFormatted = false;
-    this.partSpec = partSpec;
+  private final String resFile;
+  private final String tableName;
+  private final Map<String, String> partSpec;
+  private final String colPath;
+  private final boolean isExt;
+  private final boolean isFormatted;
+
+  public DescTableDesc(Path resFile, String tableName, Map<String, String> partSpec, String colPath, boolean isExt,
+      boolean isFormatted) {
     this.resFile = resFile.toString();
     this.tableName = tableName;
+    this.partSpec = partSpec;
     this.colPath = colPath;
-  }
-
-  public String getTable() {
-    return table;
-  }
-
-  public static String getSchema(boolean colStats) {
-    if (colStats) {
-      return colStatsSchema;
-    }
-    return schema;
-  }
-
-  /**
-   * @return the isExt
-   */
-  public boolean isExt() {
-    return isExt;
-  }
-
-  /**
-   * @param isExt
-   *          the isExt to set
-   */
-  public void setExt(boolean isExt) {
     this.isExt = isExt;
+    this.isFormatted = isFormatted;
   }
 
-  /**
-   * @return the isFormatted
-   */
-  public boolean isFormatted() {
-    return isFormatted;
-  }
-
-  /**
-   * @param isFormat
-   *          the isFormat to set
-   */
-  public void setFormatted(boolean isFormat) {
-    this.isFormatted = isFormat;
+  @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
+  public String getResFile() {
+    return resFile;
   }
 
-  /**
-   * @return the tableName
-   */
   @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getTableName() {
     return tableName;
   }
 
-  /**
-   * @param tableName
-   *          the tableName to set
-   */
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
-  /**
-   * @param colPath
-   *          the colPath to set
-   */
-  public void setColPath(String colPath) {
-    this.colPath = colPath;
+  @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public Map<String, String> getPartSpec() {
+    return partSpec;
   }
 
-  /**
-   * @return the columnPath
-   */
   public String getColumnPath() {
     return colPath;
   }
 
-  /**
-   * @return the partSpec
-   */
-  @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public Map<String, String> getPartSpec() {
-    return partSpec;
+  public boolean isExt() {
+    return isExt;
   }
 
-  /**
-   * @param partSpec
-   *          the partSpec to set
-   */
-  public void setPartSpecs(Map<String, String> partSpec) {
-    this.partSpec = partSpec;
+  public boolean isFormatted() {
+    return isFormatted;
   }
 
   /**
-   * @return the resFile
+   * thrift ddl for the result of describe table.
    */
-  @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
-  public String getResFile() {
-    return resFile;
-  }
+  private static final String SCHEMA = "col_name,data_type,comment#string:string:string";
+  private static final String COL_STATS_SCHEMA = "col_name,data_type,min,max,num_nulls,"
+      + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment"
+      + "#string:string:string:string:string:string:string:string:string:string:string:string";
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
+  public static String getSchema(boolean colStats) {
+    if (colStats) {
+      return COL_STATS_SCHEMA;
+    }
+    return SCHEMA;
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java
new file mode 100644
index 0000000..1d94ff3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import java.io.DataOutputStream;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.StatObjectConverter;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+import org.apache.hadoop.hive.ql.exec.ColumnInfo;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.CheckConstraint;
+import org.apache.hadoop.hive.ql.metadata.DefaultConstraint;
+import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
+import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo;
+import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.UniqueConstraint;
+import org.apache.hadoop.hive.ql.plan.ColStatistics;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process of dropping a table.
+ */
+public class DescTableOperation extends DDLOperation {
+  private final DescTableDesc desc;
+
+  public DescTableOperation(DDLOperationContext context, DescTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws Exception {
+    String colPath = desc.getColumnPath();
+    String tableName = desc.getTableName();
+
+    // describe the table - populate the output stream
+    Table tbl = context.getDb().getTable(tableName, false);
+    if (tbl == null) {
+      throw new HiveException(ErrorMsg.INVALID_TABLE, tableName);
+    }
+    Partition part = null;
+    if (desc.getPartSpec() != null) {
+      part = context.getDb().getPartition(tbl, desc.getPartSpec(), false);
+      if (part == null) {
+        throw new HiveException(ErrorMsg.INVALID_PARTITION,
+            StringUtils.join(desc.getPartSpec().keySet(), ','), tableName);
+      }
+      tbl = part.getTable();
+    }
+
+    DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context);
+    try {
+      LOG.debug("DDLTask: got data for {}", tableName);
+
+      List<FieldSchema> cols = null;
+      List<ColumnStatisticsObj> colStats = null;
+
+      Deserializer deserializer = tbl.getDeserializer(true);
+      if (deserializer instanceof AbstractSerDe) {
+        String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors();
+        if (errorMsgs != null && !errorMsgs.isEmpty()) {
+          throw new SQLException(errorMsgs);
+        }
+      }
+
+      if (colPath.equals(tableName)) {
+        cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ?
+            tbl.getCols() : part.getCols();
+
+        if (!desc.isFormatted()) {
+          cols.addAll(tbl.getPartCols());
+        }
+
+        if (tbl.isPartitioned() && part == null) {
+          // No partitioned specified for partitioned table, lets fetch all.
+          Map<String, String> tblProps = tbl.getParameters() == null ?
+              new HashMap<String, String>() : tbl.getParameters();
+          Map<String, Long> valueMap = new HashMap<>();
+          Map<String, Boolean> stateMap = new HashMap<>();
+          for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+            valueMap.put(stat, 0L);
+            stateMap.put(stat, true);
+          }
+          PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null,
+              context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+          int numParts = 0;
+          for (Partition partition : parts) {
+            Map<String, String> props = partition.getParameters();
+            Boolean state = StatsSetupConst.areBasicStatsUptoDate(props);
+            for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+              stateMap.put(stat, stateMap.get(stat) && state);
+              if (props != null && props.get(stat) != null) {
+                valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat)));
+              }
+            }
+            numParts++;
+          }
+          for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+            StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat)));
+            tblProps.put(stat, valueMap.get(stat).toString());
+          }
+          tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts));
+          tbl.setParameters(tblProps);
+        }
+      } else {
+        if (desc.isFormatted()) {
+          // when column name is specified in describe table DDL, colPath will
+          // will be table_name.column_name
+          String colName = colPath.split("\\.")[1];
+          String[] dbTab = Utilities.getDbTableName(tableName);
+          List<String> colNames = new ArrayList<String>();
+          colNames.add(colName.toLowerCase());
+          if (null == part) {
+            if (tbl.isPartitioned()) {
+              Map<String, String> tblProps = tbl.getParameters() == null ?
+                  new HashMap<String, String>() : tbl.getParameters();
+              if (tbl.isPartitionKey(colNames.get(0))) {
+                FieldSchema partCol = tbl.getPartColByName(colNames.get(0));
+                cols = Collections.singletonList(partCol);
+                PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null,
+                    context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+                ColumnInfo ci = new ColumnInfo(partCol.getName(),
+                    TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()), null, false);
+                ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, context.getConf());
+                ColumnStatisticsData data = new ColumnStatisticsData();
+                ColStatistics.Range r = cs.getRange();
+                StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue,
+                    r == null ? null : r.maxValue, r == null ? null : r.minValue, r == null ? null : r.maxValue,
+                    r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(),
+                    cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(),
+                    cs.getNumTrues(), cs.getNumFalses());
+                ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data);
+                colStats = Collections.singletonList(cso);
+                StatsSetupConst.setColumnStatsState(tblProps, colNames);
+              } else {
+                cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
+                List<String> parts = context.getDb().getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(),
+                    (short) -1);
+                AggrStats aggrStats = context.getDb().getAggrColStatsFor(
+                    dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false);
+                colStats = aggrStats.getColStats();
+                if (parts.size() == aggrStats.getPartsFound()) {
+                  StatsSetupConst.setColumnStatsState(tblProps, colNames);
+                } else {
+                  StatsSetupConst.removeColumnStatsState(tblProps, colNames);
+                }
+              }
+              tbl.setParameters(tblProps);
+            } else {
+              cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
+              colStats = context.getDb().getTableColumnStatistics(
+                  dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false);
+            }
+          } else {
+            List<String> partitions = new ArrayList<String>();
+            partitions.add(part.getName());
+            cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
+            colStats = context.getDb().getPartitionColumnStatistics(dbTab[0].toLowerCase(),
+                dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName());
+          }
+        } else {
+          cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
+        }
+      }
+      PrimaryKeyInfo pkInfo = null;
+      ForeignKeyInfo fkInfo = null;
+      UniqueConstraint ukInfo = null;
+      NotNullConstraint nnInfo = null;
+      DefaultConstraint dInfo = null;
+      CheckConstraint cInfo = null;
+      StorageHandlerInfo storageHandlerInfo = null;
+      if (desc.isExt() || desc.isFormatted()) {
+        pkInfo = context.getDb().getPrimaryKeys(tbl.getDbName(), tbl.getTableName());
+        fkInfo = context.getDb().getForeignKeys(tbl.getDbName(), tbl.getTableName());
+        ukInfo = context.getDb().getUniqueConstraints(tbl.getDbName(), tbl.getTableName());
+        nnInfo = context.getDb().getNotNullConstraints(tbl.getDbName(), tbl.getTableName());
+        dInfo = context.getDb().getDefaultConstraints(tbl.getDbName(), tbl.getTableName());
+        cInfo = context.getDb().getCheckConstraints(tbl.getDbName(), tbl.getTableName());
+        storageHandlerInfo = context.getDb().getStorageHandlerInfo(tbl);
+      }
+      fixDecimalColumnTypeName(cols);
+      // Information for materialized views
+      if (tbl.isMaterializedView()) {
+        final String validTxnsList = context.getDb().getConf().get(ValidTxnList.VALID_TXNS_KEY);
+        if (validTxnsList != null) {
+          List<String> tablesUsed = new ArrayList<>(tbl.getCreationMetadata().getTablesUsed());
+          ValidTxnWriteIdList currentTxnWriteIds =
+              SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList);
+          long defaultTimeWindow = HiveConf.getTimeVar(context.getDb().getConf(),
+              HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW, TimeUnit.MILLISECONDS);
+          tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl,
+              currentTxnWriteIds, defaultTimeWindow, tablesUsed, false));
+        }
+      }
+      // In case the query is served by HiveServer2, don't pad it with spaces,
+      // as HiveServer2 output is consumed by JDBC/ODBC clients.
+      boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
+      context.getFormatter().describeTable(outStream, colPath, tableName, tbl, part,
+          cols, desc.isFormatted(), desc.isExt(), isOutputPadded,
+          colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo,
+          storageHandlerInfo);
+
+      LOG.debug("DDLTask: written data for {}", tableName);
+
+    } catch (SQLException e) {
+      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+
+    return 0;
+  }
+
+  /**
+   * Fix the type name of a column of type decimal w/o precision/scale specified. This makes
+   * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored
+   * in metastore is "decimal", which is possible with previous hive.
+   *
+   * @param cols columns that to be fixed as such
+   */
+  private static void fixDecimalColumnTypeName(List<FieldSchema> cols) {
+    for (FieldSchema col : cols) {
+      if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) {
+        col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION,
+            HiveDecimal.USER_DEFAULT_SCALE));
+      }
+    }
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java
new file mode 100644
index 0000000..f910c57
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+/**
+ * DDL task description for DROP TABLE commands.
+ */
+@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class DropTableDesc implements DDLDesc, Serializable {
+  private static final long serialVersionUID = 1L;
+
+  static {
+    DDLTask2.registerOperation(DropTableDesc.class, DropTableOperation.class);
+  }
+
+  private final String tableName;
+  private final TableType expectedType;
+  private final boolean ifExists;
+  private final boolean ifPurge;
+  private final ReplicationSpec replicationSpec;
+  private final boolean validationRequired;
+
+  public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge,
+      ReplicationSpec replicationSpec) {
+    this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true);
+  }
+
+  public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge,
+      ReplicationSpec replicationSpec, boolean validationRequired) {
+    this.tableName = tableName;
+    this.expectedType = expectedType;
+    this.ifExists = ifExists;
+    this.ifPurge = ifPurge;
+    this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec;
+    this.validationRequired = validationRequired;
+  }
+
+  @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public String getTableName() {
+    return tableName;
+  }
+
+  public boolean getExpectView() {
+    return expectedType != null && expectedType == TableType.VIRTUAL_VIEW;
+  }
+
+  public boolean getExpectMaterializedView() {
+    return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW;
+  }
+
+  public boolean getIfExists() {
+    return ifExists;
+  }
+
+  public boolean getIfPurge() {
+    return ifPurge;
+  }
+
+  /**
+   * @return what kind of replication scope this drop is running under.
+   * This can result in a "DROP IF OLDER THAN" kind of semantic
+   */
+  public ReplicationSpec getReplicationSpec(){
+    return this.replicationSpec;
+  }
+
+  public boolean getValidationRequired(){
+    return this.validationRequired;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java
new file mode 100644
index 0000000..d250772
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+import com.google.common.collect.Iterables;
+
+/**
+ * Operation process of dropping a table.
+ */
+public class DropTableOperation extends DDLOperation {
+  private final DropTableDesc desc;
+
+  public DropTableOperation(DDLOperationContext context, DropTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    Table tbl = null;
+    try {
+      tbl = context.getDb().getTable(desc.getTableName());
+    } catch (InvalidTableException e) {
+      // drop table is idempotent
+    }
+
+    // This is a true DROP TABLE
+    if (tbl != null && desc.getValidationRequired()) {
+      if (tbl.isView()) {
+        if (!desc.getExpectView()) {
+          if (desc.getIfExists()) {
+            return 0;
+          }
+          if (desc.getExpectMaterializedView()) {
+            throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW");
+          } else {
+            throw new HiveException("Cannot drop a view with DROP TABLE");
+          }
+        }
+      } else if (tbl.isMaterializedView()) {
+        if (!desc.getExpectMaterializedView()) {
+          if (desc.getIfExists()) {
+            return 0;
+          }
+          if (desc.getExpectView()) {
+            throw new HiveException("Cannot drop a materialized view with DROP VIEW");
+          } else {
+            throw new HiveException("Cannot drop a materialized view with DROP TABLE");
+          }
+        }
+      } else {
+        if (desc.getExpectView()) {
+          if (desc.getIfExists()) {
+            return 0;
+          }
+          throw new HiveException("Cannot drop a base table with DROP VIEW");
+        } else if (desc.getExpectMaterializedView()) {
+          if (desc.getIfExists()) {
+            return 0;
+          }
+          throw new HiveException("Cannot drop a base table with DROP MATERIALIZED VIEW");
+        }
+      }
+    }
+
+    ReplicationSpec replicationSpec = desc.getReplicationSpec();
+    if (tbl != null && replicationSpec.isInReplicationScope()) {
+      /**
+       * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
+       * matches a DROP TABLE IF OLDER THAN(x) semantic.
+       *
+       * Ideally, commands executed under the scope of replication need to be idempotent and resilient
+       * to repeats. What can happen, sometimes, is that a drone processing a replication task can
+       * have been abandoned for not returning in time, but still execute its task after a while,
+       * which should not result in it mucking up data that has been impressed later on. So, for eg.,
+       * if we create partition P1, followed by droppping it, followed by creating it yet again,
+       * the replication of that drop should not drop the newer partition if it runs after the destination
+       * object is already in the newer state.
+       *
+       * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
+       * drop the object in question(will return false if object is newer than the event, true if not)
+       *
+       * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
+       * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
+       * do one more thing - if it does not drop the table because the table is in a newer state, it must
+       * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
+       * acts like a recursive DROP TABLE IF OLDER.
+       */
+      if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())) {
+        // Drop occured as part of replicating a drop, but the destination
+        // table was newer than the event being replicated. Ignore, but drop
+        // any partitions inside that are older.
+        if (tbl.isPartitioned()) {
+          PartitionIterable partitions = new PartitionIterable(context.getDb(), tbl, null,
+              context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+          for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){
+            context.getDb().dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true);
+          }
+        }
+        LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", desc.getTableName());
+        return 0; // table is newer, leave it be.
+      }
+    }
+
+    // drop the table
+    // TODO: API w/catalog name
+    context.getDb().dropTable(desc.getTableName(), desc.getIfPurge());
+    if (tbl != null) {
+      // Remove from cache if it is a materialized view
+      if (tbl.isMaterializedView()) {
+        HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl);
+      }
+      // We have already locked the table in DDLSemanticAnalyzer, don't do it again here
+      DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context);
+    }
+
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java
similarity index 67%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java
index 723678e..2a8b02e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java
@@ -16,51 +16,46 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 import java.util.Map;
 
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
 /**
- * LockTableDesc.
- *
+ * DDL task description for LOCK TABLE commands.
  */
 @Explain(displayName = "Lock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class LockTableDesc extends DDLDesc implements Serializable {
+public class LockTableDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
 
-  private String tableName;
-  private String mode;
-  private Map<String, String> partSpec;
-  private String queryId;
-  private String queryStr;
-
-  public LockTableDesc() {
+  static {
+    DDLTask2.registerOperation(LockTableDesc.class, LockTableOperation.class);
   }
 
-  public LockTableDesc(String tableName, String mode, Map<String, String> partSpec, String queryId) {
+  private final String tableName;
+  private final String mode;
+  private final Map<String, String> partSpec;
+  private final String queryId;
+  private final String queryStr;
+
+  public LockTableDesc(String tableName, String mode, Map<String, String> partSpec, String queryId, String queryStr) {
     this.tableName = tableName;
     this.mode      = mode;
     this.partSpec  = partSpec;
     this.queryId   = queryId;
+    this.queryStr  = queryStr;
   }
 
   public String getTableName() {
     return tableName;
   }
 
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
-  public void setMode(String mode) {
-    this.mode = mode;
-  }
-
   public String getMode() {
     return mode;
   }
@@ -69,23 +64,11 @@ public class LockTableDesc extends DDLDesc implements Serializable {
     return partSpec;
   }
 
-  public void setPartSpec(Map<String, String> partSpec) {
-    this.partSpec = partSpec;
-  }
-
   public String getQueryId() {
     return queryId;
   }
 
-  public void setQueryId(String queryId) {
-    this.queryId = queryId;
-  }
-
   public String getQueryStr() {
     return queryStr;
   }
-
-  public void setQueryStr(String queryStr) {
-    this.queryStr = queryStr;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java
similarity index 51%
copy from ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
copy to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java
index e349a0a..2044a81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java
@@ -16,36 +16,29 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.ddl;
+package org.apache.hadoop.hive.ql.ddl.table;
 
-import java.io.DataOutputStream;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Abstract ancestor class of all DDL Operation classes.
+ * Operation process of locking a table.
  */
-public abstract class DDLOperation {
-  protected static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask");
-
-  protected final DDLOperationContext context;
+public class LockTableOperation extends DDLOperation {
+  private final LockTableDesc desc;
 
-  public DDLOperation(DDLOperationContext context) {
-    this.context = context;
+  public LockTableOperation(DDLOperationContext context, LockTableDesc desc) {
+    super(context);
+    this.desc = desc;
   }
 
-  public abstract int execute() throws HiveException;
-
-  protected DataOutputStream getOutputStream(Path outputFile) throws HiveException {
-    try {
-      FileSystem fs = outputFile.getFileSystem(context.getConf());
-      return fs.create(outputFile);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
+  @Override
+  public int execute() throws HiveException {
+    Context ctx = context.getDriverContext().getCtx();
+    HiveTxnManager txnManager = ctx.getHiveTxnManager();
+    return txnManager.lockTable(context.getDb(), desc);
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java
similarity index 67%
copy from ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java
copy to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java
index 2c8e1e1..4bb609e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,16 +16,25 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.parse;
+package org.apache.hadoop.hive.ql.ddl.table;
 
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.DDLDesc;
 import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+/**
+ * DDL task description for PRE INSERT commands.
+ */
+@Explain(displayName = "Pre-Insert task", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class PreInsertTableDesc implements DDLDesc {
+  static {
+    DDLTask2.registerOperation(PreInsertTableDesc.class, PreInsertTableOperation.class);
+  }
 
-@Explain(displayName = "Pre-Insert task", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED })
-public class PreInsertTableDesc extends DDLDesc {
-  private final boolean isOverwrite;
   private final Table table;
+  private final boolean isOverwrite;
 
   public PreInsertTableDesc(Table table, boolean overwrite) {
     this.table = table;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java
new file mode 100644
index 0000000..5d85d0a
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Operation process of pre inserting a table.
+ */
+public class PreInsertTableOperation extends DDLOperation {
+  private final PreInsertTableDesc desc;
+
+  public PreInsertTableOperation(DDLOperationContext context, PreInsertTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    try {
+      HiveMetaHook hook = desc.getTable().getStorageHandler().getMetaHook();
+      if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
+        return 0;
+      }
+
+      DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
+      hiveMetaHook.preInsertTable(desc.getTable().getTTable(), desc.isOverwrite());
+    } catch (MetaException e) {
+      throw new HiveException(e);
+    }
+
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java
similarity index 58%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java
index f96c529..8fa1ef1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java
@@ -16,84 +16,46 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
- * ShowCreateTableDesc.
- *
+ * DDL task description for SHOW CREATE TABLE commands.
  */
 @Explain(displayName = "Show Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowCreateTableDesc extends DDLDesc implements Serializable {
+public class ShowCreateTableDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  String resFile;
-  String tableName;
-
-  /**
-   * table name for the result of showcreatetable.
-   */
-  private static final String table = "show_create_table";
-  /**
-   * thrift ddl for the result of showcreatetable.
-   */
-  private static final String schema = "createtab_stmt#string";
-
-  public String getTable() {
-    return table;
-  }
 
-  public String getSchema() {
-    return schema;
+  static {
+    DDLTask2.registerOperation(ShowCreateTableDesc.class, ShowCreateTableOperation.class);
   }
 
   /**
-   * For serialization use only.
+   * Thrift ddl for the result of showcreatetable.
    */
-  public ShowCreateTableDesc() {
-  }
+  public static final String SCHEMA = "createtab_stmt#string";
+
+  private final String resFile;
+  private final String tableName;
 
-  /**
-   * @param resFile
-   * @param tableName
-   *          name of table to show
-   */
   public ShowCreateTableDesc(String tableName, String resFile) {
     this.tableName = tableName;
     this.resFile = resFile;
   }
 
-  /**
-   * @return the resFile
-   */
   @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
   public String getResFile() {
     return resFile;
   }
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
-  }
-
-  /**
-   * @return the tableName
-   */
   @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getTableName() {
     return tableName;
   }
-
-  /**
-   * @param tableName
-   *          the tableName to set
-   */
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java
new file mode 100644
index 0000000..932d942
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hive.common.util.HiveStringUtils;
+import org.stringtemplate.v4.ST;
+
+/**
+ * Operation process showing the creation of a table.
+ */
+public class ShowCreateTableOperation extends DDLOperation {
+  private static final String EXTERNAL = "external";
+  private static final String TEMPORARY = "temporary";
+  private static final String LIST_COLUMNS = "columns";
+  private static final String TBL_COMMENT = "tbl_comment";
+  private static final String LIST_PARTITIONS = "partitions";
+  private static final String SORT_BUCKET = "sort_bucket";
+  private static final String SKEWED_INFO = "tbl_skewedinfo";
+  private static final String ROW_FORMAT = "row_format";
+  private static final String TBL_LOCATION = "tbl_location";
+  private static final String TBL_PROPERTIES = "tbl_properties";
+
+  private final ShowCreateTableDesc desc;
+
+  public ShowCreateTableOperation(DDLOperationContext context, ShowCreateTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    // get the create table statement for the table and populate the output
+    try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) {
+      return showCreateTable(outStream);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  private int showCreateTable(DataOutputStream outStream) throws HiveException {
+    boolean needsLocation = true;
+    StringBuilder createTabCommand = new StringBuilder();
+
+    Table tbl = context.getDb().getTable(desc.getTableName(), false);
+    List<String> duplicateProps = new ArrayList<String>();
+    try {
+      needsLocation = CreateTableOperation.doesTableNeedLocation(tbl);
+
+      if (tbl.isView()) {
+        String createTabStmt = "CREATE VIEW `" + desc.getTableName() + "` AS " + tbl.getViewExpandedText();
+        outStream.write(createTabStmt.getBytes(StandardCharsets.UTF_8));
+        return 0;
+      }
+
+      createTabCommand.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `");
+      createTabCommand.append(desc.getTableName() + "`(\n");
+      createTabCommand.append("<" + LIST_COLUMNS + ">)\n");
+      createTabCommand.append("<" + TBL_COMMENT + ">\n");
+      createTabCommand.append("<" + LIST_PARTITIONS + ">\n");
+      createTabCommand.append("<" + SORT_BUCKET + ">\n");
+      createTabCommand.append("<" + SKEWED_INFO + ">\n");
+      createTabCommand.append("<" + ROW_FORMAT + ">\n");
+      if (needsLocation) {
+        createTabCommand.append("LOCATION\n");
+        createTabCommand.append("<" + TBL_LOCATION + ">\n");
+      }
+      createTabCommand.append("TBLPROPERTIES (\n");
+      createTabCommand.append("<" + TBL_PROPERTIES + ">)\n");
+      ST createTabStmt = new ST(createTabCommand.toString());
+
+      // For cases where the table is temporary
+      String tblTemp = "";
+      if (tbl.isTemporary()) {
+        duplicateProps.add("TEMPORARY");
+        tblTemp = "TEMPORARY ";
+      }
+      // For cases where the table is external
+      String tblExternal = "";
+      if (tbl.getTableType() == TableType.EXTERNAL_TABLE) {
+        duplicateProps.add("EXTERNAL");
+        tblExternal = "EXTERNAL ";
+      }
+
+      // Columns
+      String tblColumns = "";
+      List<FieldSchema> cols = tbl.getCols();
+      List<String> columns = new ArrayList<String>();
+      for (FieldSchema col : cols) {
+        String columnDesc = "  `" + col.getName() + "` " + col.getType();
+        if (col.getComment() != null) {
+          columnDesc = columnDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'";
+        }
+        columns.add(columnDesc);
+      }
+      tblColumns = StringUtils.join(columns, ", \n");
+
+      // Table comment
+      String tblComment = "";
+      String tabComment = tbl.getProperty("comment");
+      if (tabComment != null) {
+        duplicateProps.add("comment");
+        tblComment = "COMMENT '" + HiveStringUtils.escapeHiveCommand(tabComment) + "'";
+      }
+
+      // Partitions
+      String tblPartitions = "";
+      List<FieldSchema> partKeys = tbl.getPartitionKeys();
+      if (partKeys.size() > 0) {
+        tblPartitions += "PARTITIONED BY ( \n";
+        List<String> partCols = new ArrayList<String>();
+        for (FieldSchema partKey : partKeys) {
+          String partColDesc = "  `" + partKey.getName() + "` " + partKey.getType();
+          if (partKey.getComment() != null) {
+            partColDesc = partColDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'";
+          }
+          partCols.add(partColDesc);
+        }
+        tblPartitions += StringUtils.join(partCols, ", \n");
+        tblPartitions += ")";
+      }
+
+      // Clusters (Buckets)
+      String tblSortBucket = "";
+      List<String> buckCols = tbl.getBucketCols();
+      if (buckCols.size() > 0) {
+        duplicateProps.add("SORTBUCKETCOLSPREFIX");
+        tblSortBucket += "CLUSTERED BY ( \n  ";
+        tblSortBucket += StringUtils.join(buckCols, ", \n  ");
+        tblSortBucket += ") \n";
+        List<Order> sortCols = tbl.getSortCols();
+        if (sortCols.size() > 0) {
+          tblSortBucket += "SORTED BY ( \n";
+          // Order
+          List<String> sortKeys = new ArrayList<String>();
+          for (Order sortCol : sortCols) {
+            String sortKeyDesc = "  " + sortCol.getCol() + " ";
+            if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
+              sortKeyDesc = sortKeyDesc + "ASC";
+            } else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) {
+              sortKeyDesc = sortKeyDesc + "DESC";
+            }
+            sortKeys.add(sortKeyDesc);
+          }
+          tblSortBucket += StringUtils.join(sortKeys, ", \n");
+          tblSortBucket += ") \n";
+        }
+        tblSortBucket += "INTO " + tbl.getNumBuckets() + " BUCKETS";
+      }
+
+      // Skewed Info
+      StringBuilder tblSkewedInfo = new StringBuilder();
+      SkewedInfo skewedInfo = tbl.getSkewedInfo();
+      if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) {
+        tblSkewedInfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n");
+        tblSkewedInfo.append("  ON (");
+        List<String> colValueList = new ArrayList<String>();
+        for (List<String> colValues : skewedInfo.getSkewedColValues()) {
+          colValueList.add("('" + StringUtils.join(colValues, "','") + "')");
+        }
+        tblSkewedInfo.append(StringUtils.join(colValueList, ",") + ")");
+        if (tbl.isStoredAsSubDirectories()) {
+          tblSkewedInfo.append("\n  STORED AS DIRECTORIES");
+        }
+      }
+
+      // Row format (SerDe)
+      StringBuilder tblRowFormat = new StringBuilder();
+      StorageDescriptor sd = tbl.getTTable().getSd();
+      SerDeInfo serdeInfo = sd.getSerdeInfo();
+      Map<String, String> serdeParams = serdeInfo.getParameters();
+      tblRowFormat.append("ROW FORMAT SERDE \n");
+      tblRowFormat.append("  '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n");
+      if (tbl.getStorageHandler() == null) {
+        // If serialization.format property has the default value, it will not to be included in
+        // SERDE properties
+        if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) {
+          serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT);
+        }
+        if (!serdeParams.isEmpty()) {
+          appendSerdeParams(tblRowFormat, serdeParams).append(" \n");
+        }
+        tblRowFormat.append("STORED AS INPUTFORMAT \n  '"
+            + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n");
+        tblRowFormat.append("OUTPUTFORMAT \n  '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'");
+      } else {
+        duplicateProps.add(META_TABLE_STORAGE);
+        tblRowFormat.append("STORED BY \n  '" +
+            HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(META_TABLE_STORAGE)) + "' \n");
+        // SerDe Properties
+        if (!serdeParams.isEmpty()) {
+          appendSerdeParams(tblRowFormat, serdeInfo.getParameters());
+        }
+      }
+      String tblLocation = "  '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'";
+
+      // Table properties
+      duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS);
+      String tblProperties = DDLUtils.propertiesToString(tbl.getParameters(), duplicateProps);
+
+      createTabStmt.add(TEMPORARY, tblTemp);
+      createTabStmt.add(EXTERNAL, tblExternal);
+      createTabStmt.add(LIST_COLUMNS, tblColumns);
+      createTabStmt.add(TBL_COMMENT, tblComment);
+      createTabStmt.add(LIST_PARTITIONS, tblPartitions);
+      createTabStmt.add(SORT_BUCKET, tblSortBucket);
+      createTabStmt.add(SKEWED_INFO, tblSkewedInfo);
+      createTabStmt.add(ROW_FORMAT, tblRowFormat);
+      // Table location should not be printed with hbase backed tables
+      if (needsLocation) {
+        createTabStmt.add(TBL_LOCATION, tblLocation);
+      }
+      createTabStmt.add(TBL_PROPERTIES, tblProperties);
+
+      outStream.write(createTabStmt.render().getBytes(StandardCharsets.UTF_8));
+    } catch (IOException e) {
+      LOG.info("show create table: ", e);
+      return 1;
+    }
+
+    return 0;
+  }
+
+  public static StringBuilder appendSerdeParams(StringBuilder builder, Map<String, String> serdeParam) {
+    serdeParam = new TreeMap<String, String>(serdeParam);
+    builder.append("WITH SERDEPROPERTIES ( \n");
+    List<String> serdeCols = new ArrayList<String>();
+    for (Entry<String, String> entry : serdeParam.entrySet()) {
+      serdeCols.add("  '" + entry.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'");
+    }
+    builder.append(StringUtils.join(serdeCols, ", \n")).append(')');
+    return builder;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java
similarity index 51%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java
index aac0cf2..72caa58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java
@@ -16,65 +16,41 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
-import java.util.HashMap;
 
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
-
 /**
- * ShowTblPropertiesDesc.
- *
+ * DDL task description for SHOW TABLE PROPERTIES commands.
  */
 @Explain(displayName = "Show Table Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowTblPropertiesDesc extends DDLDesc implements Serializable {
+public class ShowTablePropertiesDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  String resFile;
-  String tableName;
-  String propertyName;
-
-  /**
-   * table name for the result of showtblproperties.
-   */
-  private static final String table = "show_tableproperties";
-  /**
-   * thrift ddl for the result of showtblproperties.
-   */
-  private static final String schema = "prpt_name,prpt_value#string:string";
-
-  public String getTable() {
-    return table;
-  }
 
-  public String getSchema() {
-    return schema;
+  static {
+    DDLTask2.registerOperation(ShowTablePropertiesDesc.class, ShowTablePropertiesOperation.class);
   }
 
   /**
-   * For serialization use only.
+   * Thrift ddl for the result of showtblproperties.
    */
-  public ShowTblPropertiesDesc() {
-  }
+  public static final String SCHEMA = "prpt_name,prpt_value#string:string";
 
-  /**
-   * @param resFile
-   * @param tableName
-   *          name of table to show
-   * @param propertyName
-   *          name of property to show
-   */
-  public ShowTblPropertiesDesc(String resFile, String tableName, String propertyName) {
+  private final String resFile;
+  private final String tableName;
+  private final String propertyName;
+
+  public ShowTablePropertiesDesc(String resFile, String tableName, String propertyName) {
     this.resFile = resFile;
     this.tableName = tableName;
     this.propertyName = propertyName;
   }
 
-  /**
-   * @return the resFile
-   */
   public String getResFile() {
     return resFile;
   }
@@ -84,43 +60,13 @@ public class ShowTblPropertiesDesc extends DDLDesc implements Serializable {
     return getResFile();
   }
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
-  }
-
-  /**
-   * @return the tableName
-   */
   @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getTableName() {
     return tableName;
   }
 
-  /**
-   * @param tableName
-   *          the tableName to set
-   */
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
-  /**
-   * @return the propertyName
-   */
   @Explain(displayName = "property name")
   public String getPropertyName() {
     return propertyName;
   }
-
-  /**
-   * @param propertyName
-   *          the propertyName to set
-   */
-  public void setPropertyName(String propertyName) {
-    this.propertyName = propertyName;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java
new file mode 100644
index 0000000..385052d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+
+/**
+ * Operation process showing the table status.
+ */
+public class ShowTablePropertiesOperation extends DDLOperation {
+  private final ShowTablePropertiesDesc desc;
+
+  public ShowTablePropertiesOperation(DDLOperationContext context, ShowTablePropertiesDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    String tableName = desc.getTableName();
+
+    // show table properties - populate the output stream
+    Table tbl = context.getDb().getTable(tableName, false);
+    try {
+      if (tbl == null) {
+        String errMsg = "Table " + tableName + " does not exist";
+        DDLUtils.writeToFile(errMsg, desc.getResFile(), context);
+        return 0;
+      }
+
+      LOG.info("DDLTask: show properties for {}", tableName);
+
+      StringBuilder builder = new StringBuilder();
+      String propertyName = desc.getPropertyName();
+      if (propertyName != null) {
+        String propertyValue = tbl.getProperty(propertyName);
+        if (propertyValue == null) {
+          String errMsg = "Table " + tableName + " does not have property: " + propertyName;
+          builder.append(errMsg);
+        } else {
+          DDLUtils.appendNonNull(builder, propertyName, true);
+          DDLUtils.appendNonNull(builder, propertyValue);
+        }
+      } else {
+        Map<String, String> properties = new TreeMap<String, String>(tbl.getParameters());
+        for (Entry<String, String> entry : properties.entrySet()) {
+          DDLUtils.appendNonNull(builder, entry.getKey(), true);
+          DDLUtils.appendNonNull(builder, entry.getValue());
+        }
+      }
+
+      LOG.info("DDLTask: written data for showing properties of {}", tableName);
+      DDLUtils.writeToFile(builder.toString(), desc.getResFile(), context);
+    } catch (IOException e) {
+      LOG.info("show table properties: ", e);
+      return 1;
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java
similarity index 50%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java
index 5022e28..8c312a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java
@@ -16,96 +16,53 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
-import java.util.HashMap;
+import java.util.Map;
 
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
-
 /**
- * ShowTableStatusDesc.
- *
+ * DDL task description for SHOW TABLE STATUS commands.
  */
 @Explain(displayName = "Show Table Status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowTableStatusDesc extends DDLDesc implements Serializable {
+public class ShowTableStatusDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
-  String pattern;
-  String resFile;
-  String dbName;
-  HashMap<String, String> partSpec;
-
-  /**
-   * table name for the result of show tables.
-   */
-  private static final String table = "show_tablestatus";
-  /**
-   * thrift ddl for the result of show tables.
-   */
-  private static final String schema = "tab_name#string";
-
-  public String getTable() {
-    return table;
-  }
 
-  public String getSchema() {
-    return schema;
+  static {
+    DDLTask2.registerOperation(ShowTableStatusDesc.class, ShowTableStatusOperation.class);
   }
 
   /**
-   * For serializatino use only.
+   * Thrift ddl for the result of show tables.
    */
-  public ShowTableStatusDesc() {
-  }
+  public static final String SCHEMA = "tab_name#string";
+
+  private final String resFile;
+  private final String dbName;
+  private final String pattern;
+  private final Map<String, String> partSpec;
 
-  /**
-   * @param pattern
-   *          names of tables to show
-   */
   public ShowTableStatusDesc(String resFile, String dbName, String pattern) {
-    this.dbName = dbName;
-    this.resFile = resFile;
-    this.pattern = pattern;
+    this(resFile, dbName, pattern, null);
   }
 
-  /**
-   * @param resFile
-   * @param dbName
-   *          data base name
-   * @param pattern
-   *          names of tables to show
-   * @param partSpec
-   *          partition specification
-   */
-  public ShowTableStatusDesc(String resFile, String dbName, String pattern,
-      HashMap<String, String> partSpec) {
-    this.dbName = dbName;
+  public ShowTableStatusDesc(String resFile, String dbName, String pattern, Map<String, String> partSpec) {
     this.resFile = resFile;
+    this.dbName = dbName;
     this.pattern = pattern;
     this.partSpec = partSpec;
   }
 
-  /**
-   * @return the pattern
-   */
   @Explain(displayName = "pattern")
   public String getPattern() {
     return pattern;
   }
 
-  /**
-   * @param pattern
-   *          the pattern to set
-   */
-  public void setPattern(String pattern) {
-    this.pattern = pattern;
-  }
-
-  /**
-   * @return the resFile
-   */
   public String getResFile() {
     return resFile;
   }
@@ -115,43 +72,13 @@ public class ShowTableStatusDesc extends DDLDesc implements Serializable {
     return getResFile();
   }
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
-  }
-
-  /**
-   * @return the database name
-   */
   @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getDbName() {
     return dbName;
   }
 
-  /**
-   * @param dbName
-   *          the database name
-   */
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  /**
-   * @return the partSpec
-   */
   @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public HashMap<String, String> getPartSpec() {
+  public Map<String, String> getPartSpec() {
     return partSpec;
   }
-
-  /**
-   * @param partSpec
-   *          the partSpec to set
-   */
-  public void setPartSpec(HashMap<String, String> partSpec) {
-    this.partSpec = partSpec;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java
new file mode 100644
index 0000000..ea695fd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+
+import java.io.DataOutputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process showing the table status.
+ */
+public class ShowTableStatusOperation extends DDLOperation {
+  private final ShowTableStatusDesc desc;
+
+  public ShowTableStatusOperation(DDLOperationContext context, ShowTableStatusDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    // get the tables for the desired pattern - populate the output stream
+    List<Table> tbls = new ArrayList<Table>();
+    Map<String, String> part = desc.getPartSpec();
+    Partition par = null;
+    if (part != null) {
+      Table tbl = context.getDb().getTable(desc.getDbName(), desc.getPattern());
+      par = context.getDb().getPartition(tbl, part, false);
+      if (par == null) {
+        throw new HiveException("Partition " + part + " for table " + desc.getPattern() + " does not exist.");
+      }
+      tbls.add(tbl);
+    } else {
+      LOG.debug("pattern: {}", desc.getPattern());
+      List<String> tblStr = context.getDb().getTablesForDb(desc.getDbName(), desc.getPattern());
+      SortedSet<String> sortedTbls = new TreeSet<String>(tblStr);
+      Iterator<String> iterTbls = sortedTbls.iterator();
+      while (iterTbls.hasNext()) {
+        // create a row per table name
+        String tblName = iterTbls.next();
+        Table tbl = context.getDb().getTable(desc.getDbName(), tblName);
+        tbls.add(tbl);
+      }
+      LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size());
+    }
+
+    // write the results in the file
+    DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context);
+    try {
+      context.getFormatter().showTableStatus(outStream, context.getDb(), context.getConf(), tbls, part, par);
+    } catch (Exception e) {
+      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status");
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java
similarity index 59%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java
index 0f7a3cd..584433b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java
@@ -16,27 +16,27 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
-
 /**
- * ShowTablesDesc.
- *
+ * DDL task description for SHOW TABLES commands.
  */
 @Explain(displayName = "Show Tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowTablesDesc extends DDLDesc implements Serializable {
+public class ShowTablesDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
 
-  /**
-   * table name for the result of show tables.
-   */
-  private static final String table = "show";
+  static {
+    DDLTask2.registerOperation(ShowTablesDesc.class, ShowTablesOperation.class);
+  }
 
   /**
    * thrift ddl for the result of show tables and show views.
@@ -54,161 +54,79 @@ public class ShowTablesDesc extends DDLDesc implements Serializable {
   private static final String MATERIALIZED_VIEWS_SCHEMA =
       "mv_name,rewrite_enabled,mode#string:string:string";
 
+  private final String resFile;
+  private final String dbName;
+  private final String pattern;
+  private final TableType type;
+  private final TableType typeFilter;
+  private final boolean isExtended;
 
-  TableType type;
-  String pattern;
-  TableType typeFilter;
-  String dbName;
-  String resFile;
-  boolean isExtended;
-
-  public String getTable() {
-    return table;
+  public ShowTablesDesc(Path resFile) {
+    this(resFile, null, null, null, null, false);
   }
 
-  public String getSchema() {
-    if (type != null && type == TableType.MATERIALIZED_VIEW) {
-      return MATERIALIZED_VIEWS_SCHEMA;
-    }
-    return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA;
+  public ShowTablesDesc(Path resFile, String dbName) {
+    this(resFile, dbName, null, null, null, false);
   }
 
-  public ShowTablesDesc() {
+  public ShowTablesDesc(Path resFile, String dbName, TableType type) {
+    this(resFile, dbName, null, type, null, false);
   }
 
-  /**
-   * @param resFile
-   */
-  public ShowTablesDesc(Path resFile) {
-    this.resFile = resFile.toString();
-    pattern = null;
+  public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) {
+    this(resFile, dbName, pattern, null, typeFilter, isExtended);
   }
 
-  /**
-   * @param dbName
-   *          name of database to show tables of
-   */
-  public ShowTablesDesc(Path resFile, String dbName) {
-    this.resFile = resFile.toString();
-    this.dbName = dbName;
+  public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) {
+    this(resFile, dbName, pattern, type, null, false);
   }
 
-  /**
-   * @param pattern
-   *          names of tables to show
-   */
-  public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) {
+
+  public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type, TableType typeFilter,
+      boolean isExtended) {
     this.resFile = resFile.toString();
     this.dbName = dbName;
     this.pattern = pattern;
+    this.type = type;
     this.typeFilter = typeFilter;
     this.isExtended = isExtended;
   }
 
-  /**
-   * @param type
-   *          type of the tables to show
-   */
-  public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) {
-    this.resFile = resFile.toString();
-    this.dbName = dbName;
-    this.pattern = pattern;
-    this.type    = type;
-  }
-
-  /**
-   * @return the pattern
-   */
   @Explain(displayName = "pattern")
   public String getPattern() {
     return pattern;
   }
 
-  /**
-   * @param pattern
-   *          the pattern to set
-   */
-  public void setPattern(String pattern) {
-    this.pattern = pattern;
-  }
-
-  /**
-   * @return the table type to be fetched
-   */
   @Explain(displayName = "type")
   public TableType getType() {
     return type;
   }
 
-  /**
-   * @param type
-   *          the table type to set
-   */
-  public void setType(TableType type) {
-    this.type = type;
-  }
-
-  /**
-   * @return the resFile
-   */
   @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
   public String getResFile() {
     return resFile;
   }
 
-  /**
-   * @param resFile
-   *          the resFile to set
-   */
-  public void setResFile(String resFile) {
-    this.resFile = resFile;
-  }
-
-  /**
-   * @return the dbName
-   */
   @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getDbName() {
     return dbName;
   }
 
-  /**
-   * @param dbName
-   *          the dbName to set
-   */
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  /**
-   * @return is extended
-   */
-  @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true)
+  @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
+      displayOnlyOnTrue = true)
   public boolean isExtended() {
     return isExtended;
   }
 
-  /**
-   * @param isExtended
-   *          whether extended modifier is enabled
-   */
-  public void setIsExtended(boolean isExtended) {
-    this.isExtended = isExtended;
-  }
-
-  /**
-   * @return table type filter, null if it is not filtered
-   */
   @Explain(displayName = "table type filter", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public TableType getTypeFilter() {
     return typeFilter;
   }
 
-  /**
-   * @param typeFilter
-   *          table type filter for show statement
-   */
-  public void setTypeFilter(TableType typeFilter) {
-    this.typeFilter = typeFilter;
+  public String getSchema() {
+    if (type != null && type == TableType.MATERIALIZED_VIEW) {
+      return MATERIALIZED_VIEWS_SCHEMA;
+    }
+    return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA;
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java
new file mode 100644
index 0000000..71b5717
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+
+import java.io.DataOutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * Operation process showing the tables.
+ */
+public class ShowTablesOperation extends DDLOperation {
+  private final ShowTablesDesc desc;
+
+  public ShowTablesOperation(DDLOperationContext context, ShowTablesDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    TableType type       = desc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs
+    String dbName        = desc.getDbName();
+    String pattern       = desc.getPattern(); // if null, all tables/views are returned
+    TableType typeFilter = desc.getTypeFilter();
+    String resultsFile   = desc.getResFile();
+    boolean isExtended   = desc.isExtended();
+
+    if (!context.getDb().databaseExists(dbName)) {
+      throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName);
+    }
+
+    LOG.debug("pattern: {}", pattern);
+    LOG.debug("typeFilter: {}", typeFilter);
+
+    List<String> tableNames  = null;
+    List<Table> tableObjects = null;
+    if (type == null) {
+      if (isExtended) {
+        tableObjects = new ArrayList<>();
+        tableObjects.addAll(context.getDb().getTableObjectsByType(dbName, pattern, typeFilter));
+        LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size());
+      } else {
+        tableNames = context.getDb().getTablesByType(dbName, pattern, typeFilter);
+        LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size());
+      }
+    } else if (type == TableType.MATERIALIZED_VIEW) {
+      tableObjects = new ArrayList<>();
+      tableObjects.addAll(context.getDb().getMaterializedViewObjectsByPattern(dbName, pattern));
+      LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size());
+    } else if (type == TableType.VIRTUAL_VIEW) {
+      tableNames = context.getDb().getTablesByType(dbName, pattern, type);
+      LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size());
+    } else {
+      throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS");
+    }
+
+    // write the results in the file
+    DataOutputStream outStream = null;
+    try {
+      Path resFile = new Path(resultsFile);
+      FileSystem fs = resFile.getFileSystem(context.getConf());
+      outStream = fs.create(resFile);
+      // Sort by name and print
+      if (tableNames != null) {
+        SortedSet<String> sortedSet = new TreeSet<String>(tableNames);
+        context.getFormatter().showTables(outStream, sortedSet);
+      } else {
+        Collections.sort(tableObjects, Comparator.comparing(Table::getTableName));
+        if (isExtended) {
+          context.getFormatter().showTablesExtended(outStream, tableObjects);
+        } else {
+          context.getFormatter().showMaterializedViews(outStream, tableObjects);
+        }
+      }
+      outStream.close();
+    } catch (Exception e) {
+      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java
similarity index 75%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java
index 61deb24..1f0cd82 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java
@@ -16,56 +16,59 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
+import java.io.Serializable;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
+import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 
 /**
- * Truncates managed table or partition
+ * DDL task description for TRUNCATE TABLE commands.
  */
 @Explain(displayName = "Truncate Table or Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWriteId {
-  private final static Logger LOG = LoggerFactory.getLogger(TruncateTableDesc.class);
-
+public class TruncateTableDesc implements DDLDesc, Serializable, DDLDescWithWriteId {
   private static final long serialVersionUID = 1L;
 
-  private String tableName;
-  private String fullTableName;
-  private Map<String, String> partSpec;
+  static {
+    DDLTask2.registerOperation(TruncateTableDesc.class, TruncateTableOperation.class);
+  }
+
+  private final String tableName;
+  private final String fullTableName;
+  private final Map<String, String> partSpec;
+  private final ReplicationSpec replicationSpec;
+  private final boolean isTransactional;
+
   private List<Integer> columnIndexes;
   private Path inputDir;
   private Path outputDir;
   private ListBucketingCtx lbCtx;
-  private ReplicationSpec replicationSpec;
-  private long writeId = 0;
-  private boolean isTransactional;
-
-  public TruncateTableDesc() {
-  }
 
+  private long writeId = 0;
 
   public TruncateTableDesc(String tableName, Map<String, String> partSpec, ReplicationSpec replicationSpec) {
     this(tableName, partSpec, replicationSpec, null);
   }
 
-  public TruncateTableDesc(String tableName, Map<String, String> partSpec,
-      ReplicationSpec replicationSpec, Table table) {
+  public TruncateTableDesc(String tableName, Map<String, String> partSpec, ReplicationSpec replicationSpec,
+      Table table) {
     this.tableName = tableName;
+    this.fullTableName = table == null ? tableName : TableName.getDbTable(table.getDbName(), table.getTableName());
     this.partSpec = partSpec;
     this.replicationSpec = replicationSpec;
     this.isTransactional = AcidUtils.isTransactionalTable(table);
-    this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable());
   }
 
   @Explain(displayName = "TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -73,8 +76,9 @@ public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWri
     return tableName;
   }
 
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
+  @Override
+  public String getFullTableName() {
+    return fullTableName;
   }
 
   @Explain(displayName = "Partition Spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -82,8 +86,12 @@ public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWri
     return partSpec;
   }
 
-  public void setPartSpec(Map<String, String> partSpec) {
-    this.partSpec = partSpec;
+  /**
+   * @return what kind of replication scope this truncate is running under.
+   * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic
+   */
+  public ReplicationSpec getReplicationSpec() {
+    return replicationSpec;
   }
 
   @Explain(displayName = "Column Indexes")
@@ -119,24 +127,12 @@ public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWri
     this.lbCtx = lbCtx;
   }
 
-  /**
-   * @return what kind of replication scope this truncate is running under.
-   * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic
-   */
-  public ReplicationSpec getReplicationSpec() { return this.replicationSpec; }
-
   @Override
   public void setWriteId(long writeId) {
     this.writeId = writeId;
   }
 
   @Override
-  public String getFullTableName() {
-    return fullTableName;
-  }
-
-
-  @Override
   public boolean mayNeedWriteId() {
     return isTransactional;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java
new file mode 100644
index 0000000..9778bfa
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Map;
+
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.ddl.DDLUtils;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask;
+import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+/**
+ * Operation process of truncating a table.
+ */
+public class TruncateTableOperation extends DDLOperation {
+  private final TruncateTableDesc desc;
+
+  public TruncateTableOperation(DDLOperationContext context, TruncateTableDesc desc) {
+    super(context);
+    this.desc = desc;
+  }
+
+  @Override
+  public int execute() throws HiveException {
+    if (desc.getColumnIndexes() != null) {
+      ColumnTruncateWork truncateWork = new ColumnTruncateWork(desc.getColumnIndexes(), desc.getInputDir(),
+          desc.getOutputDir());
+      truncateWork.setListBucketingCtx(desc.getLbCtx());
+      truncateWork.setMapperCannotSpanPartns(true);
+      DriverContext driverCxt = new DriverContext();
+      ColumnTruncateTask taskExec = new ColumnTruncateTask();
+      taskExec.initialize(context.getQueryState(), null, driverCxt, null);
+      taskExec.setWork(truncateWork);
+      taskExec.setQueryPlan(context.getQueryPlan());
+      Task<? extends Serializable> subtask = taskExec;
+      int ret = taskExec.execute(driverCxt);
+      if (subtask.getException() != null) {
+        context.getTask().setException(subtask.getException());
+      }
+      return ret;
+    }
+
+    String tableName = desc.getTableName();
+    Map<String, String> partSpec = desc.getPartSpec();
+
+    ReplicationSpec replicationSpec = desc.getReplicationSpec();
+    if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), tableName, partSpec, replicationSpec)) {
+      // no truncate, the table is missing either due to drop/rename which follows the truncate.
+      // or the existing table is newer than our update.
+      LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update",
+          tableName, (partSpec == null) ?
+              "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values())));
+      return 0;
+    }
+
+    try {
+      context.getDb().truncateTable(tableName, partSpec,
+              replicationSpec != null && replicationSpec.isInReplicationScope() ? desc.getWriteId() : 0L);
+    } catch (Exception e) {
+      throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
+    }
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java
similarity index 74%
rename from ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java
index 0b91463..8605024 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java
@@ -16,29 +16,30 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.plan;
+package org.apache.hadoop.hive.ql.ddl.table;
 
 import java.io.Serializable;
 import java.util.Map;
 
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
-
 /**
- * UnlockTableDesc.
- *
+ * DDL task description for UNLOCK TABLE commands.
  */
 @Explain(displayName = "Unlock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class UnlockTableDesc extends DDLDesc implements Serializable {
+public class UnlockTableDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
 
-  private String tableName;
-  private Map<String, String> partSpec;
-
-  public UnlockTableDesc() {
+  static {
+    DDLTask2.registerOperation(UnlockTableDesc.class, UnlockTableOperation.class);
   }
 
+  private final String tableName;
+  private final Map<String, String> partSpec;
+
   public UnlockTableDesc(String tableName, Map<String, String> partSpec) {
     this.tableName = tableName;
     this.partSpec  = partSpec;
@@ -48,15 +49,7 @@ public class UnlockTableDesc extends DDLDesc implements Serializable {
     return tableName;
   }
 
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
   public Map<String, String> getPartSpec() {
     return partSpec;
   }
-
-  public void setPartSpec(Map<String, String> partSpec) {
-    this.partSpec = partSpec;
-  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java
similarity index 51%
copy from ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
copy to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java
index e349a0a..8b70e06 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java
@@ -16,36 +16,29 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.ddl;
+package org.apache.hadoop.hive.ql.ddl.table;
 
-import java.io.DataOutputStream;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Abstract ancestor class of all DDL Operation classes.
+ * Operation process of unlocking a table.
  */
-public abstract class DDLOperation {
-  protected static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.DDLTask");
-
-  protected final DDLOperationContext context;
+public class UnlockTableOperation extends DDLOperation {
+  private final UnlockTableDesc desc;
 
-  public DDLOperation(DDLOperationContext context) {
-    this.context = context;
+  public UnlockTableOperation(DDLOperationContext context, UnlockTableDesc desc) {
+    super(context);
+    this.desc = desc;
   }
 
-  public abstract int execute() throws HiveException;
-
-  protected DataOutputStream getOutputStream(Path outputFile) throws HiveException {
-    try {
-      FileSystem fs = outputFile.getFileSystem(context.getConf());
-      return fs.create(outputFile);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
+  @Override
+  public int execute() throws HiveException {
+    Context ctx = context.getDriverContext().getCtx();
+    HiveTxnManager txnManager = ctx.getHiveTxnManager();
+    return txnManager.unlockTable(context.getDb(), desc);
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java
similarity index 51%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java
rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java
index 2c8e1e1..6fc4730 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,27 +16,5 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.parse;
-
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.DDLDesc;
-import org.apache.hadoop.hive.ql.plan.Explain;
-
-@Explain(displayName = "Pre-Insert task", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED })
-public class PreInsertTableDesc extends DDLDesc {
-  private final boolean isOverwrite;
-  private final Table table;
-
-  public PreInsertTableDesc(Table table, boolean overwrite) {
-    this.table = table;
-    this.isOverwrite = overwrite;
-  }
-
-  public Table getTable() {
-    return table;
-  }
-
-  public boolean isOverwrite() {
-    return isOverwrite;
-  }
-}
+/** Table related DDL operation descriptions and operations. */
+package org.apache.hadoop.hive.ql.ddl.table;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index a56695b..f4281bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
 
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
@@ -28,25 +27,18 @@ import java.io.OutputStreamWriter;
 import java.io.Serializable;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.nio.charset.StandardCharsets;
 import java.sql.SQLException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedSet;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -62,10 +54,7 @@ import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook;
@@ -74,13 +63,8 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Msck;
 import org.apache.hadoop.hive.metastore.MsckInfo;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.PartitionManagementTask;
-import org.apache.hadoop.hive.metastore.StatObjectConverter;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionResponse;
 import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -93,14 +77,6 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -138,8 +114,6 @@ import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
-import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask;
-import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork;
 import org.apache.hadoop.hive.ql.lockmgr.DbLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
@@ -147,30 +121,20 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
-import org.apache.hadoop.hive.ql.metadata.CheckConstraint;
-import org.apache.hadoop.hive.ql.metadata.DefaultConstraint;
-import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
-import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
-import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo;
-import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.metadata.UniqueConstraint;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter;
 import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable;
 import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.CalcitePlanner;
 import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
 import org.apache.hadoop.hive.ql.parse.ParseUtils;
-import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
@@ -185,20 +149,16 @@ import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
 import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
 import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
-import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
 import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
-import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
-import org.apache.hadoop.hive.ql.plan.DescTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
@@ -209,7 +169,6 @@ import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc;
 import org.apache.hadoop.hive.ql.plan.KillQueryDesc;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.MsckDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
@@ -225,20 +184,13 @@ import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
-import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
 import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
 import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc;
 import org.apache.hadoop.hive.ql.plan.TezWork;
-import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
@@ -253,12 +205,9 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObje
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveV1Authorizer;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
-import org.apache.hadoop.hive.serde2.SerDeSpec;
 import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
 import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
@@ -267,7 +216,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
@@ -276,12 +224,10 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.tools.HadoopArchives;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hive.common.util.AnnotationUtils;
-import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hive.common.util.ReflectionUtil;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.stringtemplate.v4.ST;
 
 /**
  * DDLTask implementation.
@@ -302,11 +248,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
   private MetaDataFormatter formatter;
   private final HiveAuthorizationTranslator defaultAuthorizationTranslator = new DefaultHiveAuthorizationTranslator();
-  private Task<? extends Serializable> subtask = null;
-
-  public Task<? extends Serializable> getSubtask() {
-    return subtask;
-  }
 
   @Override
   public boolean requireLock() {
@@ -344,19 +285,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     try {
       db = Hive.get(conf);
 
-      CreateTableDesc crtTbl = work.getCreateTblDesc();
-      if (crtTbl != null) {
-        return createTable(db, crtTbl);
-      }
-
-      CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
-      if (crtTblLike != null) {
-        return createTableLike(db, crtTblLike);
-      }
-
-      DropTableDesc dropTbl = work.getDropTblDesc();
-      if (dropTbl != null) {
-        dropTableOrPartitions(db, dropTbl);
+      DropPartitionDesc dropPartition = work.getDropPartitionDesc();
+      if (dropPartition != null) {
+        dropPartitions(db, dropPartition);
         return 0;
       }
 
@@ -410,36 +341,16 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         return msck(db, msckDesc);
       }
 
-      DescTableDesc descTbl = work.getDescTblDesc();
-      if (descTbl != null) {
-        return describeTable(db, descTbl);
-      }
-
       DescFunctionDesc descFunc = work.getDescFunctionDesc();
       if (descFunc != null) {
         return describeFunction(db, descFunc);
       }
 
-      ShowTablesDesc showTbls = work.getShowTblsDesc();
-      if (showTbls != null) {
-        return showTablesOrViews(db, showTbls);
-      }
-
       ShowColumnsDesc showCols = work.getShowColumnsDesc();
       if (showCols != null) {
         return showColumns(db, showCols);
       }
 
-      ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
-      if (showTblStatus != null) {
-        return showTableStatus(db, showTblStatus);
-      }
-
-      ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
-      if (showTblProperties != null) {
-        return showTableProperties(db, showTblProperties);
-      }
-
       ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
       if (showFuncs != null) {
         return showFunctions(db, showFuncs);
@@ -465,31 +376,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         return abortTxns(db, abortTxnsDesc);
       }
 
-      LockTableDesc lockTbl = work.getLockTblDesc();
-      if (lockTbl != null) {
-        return lockTable(db, lockTbl);
-      }
-
-      UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
-      if (unlockTbl != null) {
-        return unlockTable(db, unlockTbl);
-      }
-
       ShowPartitionsDesc showParts = work.getShowPartsDesc();
       if (showParts != null) {
         return showPartitions(db, showParts);
       }
 
-      ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
-      if (showCreateDb != null) {
-        return showCreateDatabase(db, showCreateDb);
-      }
-
-      ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
-      if (showCreateTbl != null) {
-        return showCreateTable(db, showCreateTbl);
-      }
-
       ShowConfDesc showConf = work.getShowConfDesc();
       if (showConf != null) {
         return showConf(db, showConf);
@@ -534,11 +425,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         return alterTableAlterPart(db, alterPartDesc);
       }
 
-      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
-      if (truncateTableDesc != null) {
-        return truncateTable(db, truncateTableDesc);
-      }
-
       AlterTableExchangePartition alterTableExchangePartition =
           work.getAlterTableExchangePartition();
       if (alterTableExchangePartition != null) {
@@ -553,10 +439,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       if (insertCommitHookDesc != null) {
         return insertCommitWork(db, insertCommitHookDesc);
       }
-      PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
-      if (preInsertTableDesc != null) {
-        return preInsertWork(db, preInsertTableDesc);
-      }
 
       KillQueryDesc killQueryDesc = work.getKillQueryDesc();
       if (killQueryDesc != null) {
@@ -802,20 +684,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-  private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException {
-    try{
-      HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook();
-      if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
-        return 0;
-      }
-      DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
-      hiveMetaHook.preInsertTable(preInsertTableDesc.getTable().getTTable(), preInsertTableDesc.isOverwrite());
-    } catch (MetaException e) {
-      throw new HiveException(e);
-    }
-    return 0;
-  }
-
   private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException {
     boolean failed = true;
     HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook();
@@ -964,7 +832,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
     // initialize the task and execute
     task.initialize(queryState, getQueryPlan(), driverCxt, opContext);
-    subtask = task;
+    Task<? extends Serializable> subtask = task;
     int ret = task.execute(driverCxt);
     if (subtask.getException() != null) {
       setException(subtask.getException());
@@ -2109,373 +1977,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-  private int showCreateDatabase(Hive db, ShowCreateDatabaseDesc showCreateDb) throws HiveException {
-    DataOutputStream outStream = getOutputStream(showCreateDb.getResFile());
-    try {
-      String dbName = showCreateDb.getDatabaseName();
-      return showCreateDatabase(db, outStream, dbName);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-  }
-
-  private int showCreateDatabase(Hive db, DataOutputStream outStream, String databaseName)
-      throws Exception {
-    Database database = db.getDatabase(databaseName);
-
-    StringBuilder createDb_str = new StringBuilder();
-    createDb_str.append("CREATE DATABASE `").append(database.getName()).append("`\n");
-    if (database.getDescription() != null) {
-      createDb_str.append("COMMENT\n  '");
-      createDb_str.append(
-          HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n");
-    }
-    createDb_str.append("LOCATION\n  '");
-    createDb_str.append(database.getLocationUri()).append("'\n");
-    String propertiesToString = propertiesToString(database.getParameters(), null);
-    if (!propertiesToString.isEmpty()) {
-      createDb_str.append("WITH DBPROPERTIES (\n");
-      createDb_str.append(propertiesToString).append(")\n");
-    }
-
-    outStream.write(createDb_str.toString().getBytes("UTF-8"));
-    return 0;
-  }
-
-  /**
-   * Write a statement of how to create a table to a file.
-   *
-   * @param db
-   *          The database in question.
-   * @param showCreateTbl
-   *          This is the table we're interested in.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws HiveException {
-    // get the create table statement for the table and populate the output
-    DataOutputStream outStream = getOutputStream(showCreateTbl.getResFile());
-    try {
-      String tableName = showCreateTbl.getTableName();
-      return showCreateTable(db, outStream, tableName);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-  }
-
-  private int showCreateTable(Hive db, DataOutputStream outStream, String tableName)
-      throws HiveException {
-    final String EXTERNAL = "external";
-    final String TEMPORARY = "temporary";
-    final String LIST_COLUMNS = "columns";
-    final String TBL_COMMENT = "tbl_comment";
-    final String LIST_PARTITIONS = "partitions";
-    final String SORT_BUCKET = "sort_bucket";
-    final String SKEWED_INFO = "tbl_skewedinfo";
-    final String ROW_FORMAT = "row_format";
-    final String TBL_LOCATION = "tbl_location";
-    final String TBL_PROPERTIES = "tbl_properties";
-    boolean needsLocation = true;
-    StringBuilder createTab_str = new StringBuilder();
-
-    Table tbl = db.getTable(tableName, false);
-    List<String> duplicateProps = new ArrayList<String>();
-    try {
-      needsLocation = doesTableNeedLocation(tbl);
-
-      if (tbl.isView()) {
-        String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " +
-            tbl.getViewExpandedText();
-        outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8));
-        return 0;
-      }
-
-      createTab_str.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `");
-      createTab_str.append(tableName + "`(\n");
-      createTab_str.append("<" + LIST_COLUMNS + ">)\n");
-      createTab_str.append("<" + TBL_COMMENT + ">\n");
-      createTab_str.append("<" + LIST_PARTITIONS + ">\n");
-      createTab_str.append("<" + SORT_BUCKET + ">\n");
-      createTab_str.append("<" + SKEWED_INFO + ">\n");
-      createTab_str.append("<" + ROW_FORMAT + ">\n");
-      if (needsLocation) {
-        createTab_str.append("LOCATION\n");
-        createTab_str.append("<" + TBL_LOCATION + ">\n");
-      }
-      createTab_str.append("TBLPROPERTIES (\n");
-      createTab_str.append("<" + TBL_PROPERTIES + ">)\n");
-      ST createTab_stmt = new ST(createTab_str.toString());
-
-      // For cases where the table is temporary
-      String tbl_temp = "";
-      if (tbl.isTemporary()) {
-        duplicateProps.add("TEMPORARY");
-        tbl_temp = "TEMPORARY ";
-      }
-      // For cases where the table is external
-      String tbl_external = "";
-      if (tbl.getTableType() == TableType.EXTERNAL_TABLE) {
-        duplicateProps.add("EXTERNAL");
-        tbl_external = "EXTERNAL ";
-      }
-
-      // Columns
-      String tbl_columns = "";
-      List<FieldSchema> cols = tbl.getCols();
-      List<String> columns = new ArrayList<String>();
-      for (FieldSchema col : cols) {
-        String columnDesc = "  `" + col.getName() + "` " + col.getType();
-        if (col.getComment() != null) {
-          columnDesc = columnDesc + " COMMENT '"
-              + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'";
-        }
-        columns.add(columnDesc);
-      }
-      tbl_columns = StringUtils.join(columns, ", \n");
-
-      // Table comment
-      String tbl_comment = "";
-      String tabComment = tbl.getProperty("comment");
-      if (tabComment != null) {
-        duplicateProps.add("comment");
-        tbl_comment = "COMMENT '"
-            + HiveStringUtils.escapeHiveCommand(tabComment) + "'";
-      }
-
-      // Partitions
-      String tbl_partitions = "";
-      List<FieldSchema> partKeys = tbl.getPartitionKeys();
-      if (partKeys.size() > 0) {
-        tbl_partitions += "PARTITIONED BY ( \n";
-        List<String> partCols = new ArrayList<String>();
-        for (FieldSchema partKey : partKeys) {
-          String partColDesc = "  `" + partKey.getName() + "` " + partKey.getType();
-          if (partKey.getComment() != null) {
-            partColDesc = partColDesc + " COMMENT '"
-                + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'";
-          }
-          partCols.add(partColDesc);
-        }
-        tbl_partitions += StringUtils.join(partCols, ", \n");
-        tbl_partitions += ")";
-      }
-
-      // Clusters (Buckets)
-      String tbl_sort_bucket = "";
-      List<String> buckCols = tbl.getBucketCols();
-      if (buckCols.size() > 0) {
-        duplicateProps.add("SORTBUCKETCOLSPREFIX");
-        tbl_sort_bucket += "CLUSTERED BY ( \n  ";
-        tbl_sort_bucket += StringUtils.join(buckCols, ", \n  ");
-        tbl_sort_bucket += ") \n";
-        List<Order> sortCols = tbl.getSortCols();
-        if (sortCols.size() > 0) {
-          tbl_sort_bucket += "SORTED BY ( \n";
-          // Order
-          List<String> sortKeys = new ArrayList<String>();
-          for (Order sortCol : sortCols) {
-            String sortKeyDesc = "  " + sortCol.getCol() + " ";
-            if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
-              sortKeyDesc = sortKeyDesc + "ASC";
-            }
-            else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) {
-              sortKeyDesc = sortKeyDesc + "DESC";
-            }
-            sortKeys.add(sortKeyDesc);
-          }
-          tbl_sort_bucket += StringUtils.join(sortKeys, ", \n");
-          tbl_sort_bucket += ") \n";
-        }
-        tbl_sort_bucket += "INTO " + tbl.getNumBuckets() + " BUCKETS";
-      }
-
-      // Skewed Info
-      StringBuilder tbl_skewedinfo = new StringBuilder();
-      SkewedInfo skewedInfo = tbl.getSkewedInfo();
-      if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) {
-        tbl_skewedinfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n");
-        tbl_skewedinfo.append("  ON (");
-        List<String> colValueList = new ArrayList<String>();
-        for (List<String> colValues : skewedInfo.getSkewedColValues()) {
-          colValueList.add("('" + StringUtils.join(colValues, "','") + "')");
-        }
-        tbl_skewedinfo.append(StringUtils.join(colValueList, ",") + ")");
-        if (tbl.isStoredAsSubDirectories()) {
-          tbl_skewedinfo.append("\n  STORED AS DIRECTORIES");
-        }
-      }
-
-      // Row format (SerDe)
-      StringBuilder tbl_row_format = new StringBuilder();
-      StorageDescriptor sd = tbl.getTTable().getSd();
-      SerDeInfo serdeInfo = sd.getSerdeInfo();
-      Map<String, String> serdeParams = serdeInfo.getParameters();
-      tbl_row_format.append("ROW FORMAT SERDE \n");
-      tbl_row_format.append("  '"
-          + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n");
-      if (tbl.getStorageHandler() == null) {
-        // If serialization.format property has the default value, it will not to be included in
-        // SERDE properties
-        if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(
-            serdeConstants.SERIALIZATION_FORMAT))){
-          serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT);
-        }
-        if (!serdeParams.isEmpty()) {
-          appendSerdeParams(tbl_row_format, serdeParams).append(" \n");
-        }
-        tbl_row_format.append("STORED AS INPUTFORMAT \n  '"
-            + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n");
-        tbl_row_format.append("OUTPUTFORMAT \n  '"
-            + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'");
-      } else {
-        duplicateProps.add(META_TABLE_STORAGE);
-        tbl_row_format.append("STORED BY \n  '"
-            + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(
-            META_TABLE_STORAGE)) + "' \n");
-        // SerDe Properties
-        if (!serdeParams.isEmpty()) {
-          appendSerdeParams(tbl_row_format, serdeInfo.getParameters());
-        }
-      }
-      String tbl_location = "  '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'";
-
-      // Table properties
-      duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS);
-      String tbl_properties = propertiesToString(tbl.getParameters(), duplicateProps);
-
-      createTab_stmt.add(TEMPORARY, tbl_temp);
-      createTab_stmt.add(EXTERNAL, tbl_external);
-      createTab_stmt.add(LIST_COLUMNS, tbl_columns);
-      createTab_stmt.add(TBL_COMMENT, tbl_comment);
-      createTab_stmt.add(LIST_PARTITIONS, tbl_partitions);
-      createTab_stmt.add(SORT_BUCKET, tbl_sort_bucket);
-      createTab_stmt.add(SKEWED_INFO, tbl_skewedinfo);
-      createTab_stmt.add(ROW_FORMAT, tbl_row_format);
-      // Table location should not be printed with hbase backed tables
-      if (needsLocation) {
-        createTab_stmt.add(TBL_LOCATION, tbl_location);
-      }
-      createTab_stmt.add(TBL_PROPERTIES, tbl_properties);
-
-      outStream.write(createTab_stmt.render().getBytes(StandardCharsets.UTF_8));
-    } catch (IOException e) {
-      LOG.info("show create table: ", e);
-      return 1;
-    }
-
-    return 0;
-  }
-
-  private String propertiesToString(Map<String, String> props, List<String> exclude) {
-    String prop_string = "";
-    if (!props.isEmpty()) {
-      Map<String, String> properties = new TreeMap<String, String>(props);
-      List<String> realProps = new ArrayList<String>();
-      for (String key : properties.keySet()) {
-        if (properties.get(key) != null && (exclude == null || !exclude.contains(key))) {
-          realProps.add("  '" + key + "'='" +
-              HiveStringUtils.escapeHiveCommand(properties.get(key)) + "'");
-        }
-      }
-      prop_string += StringUtils.join(realProps, ", \n");
-    }
-    return prop_string;
-  }
-
-  public static StringBuilder appendSerdeParams(
-      StringBuilder builder, Map<String, String> serdeParam) {
-    serdeParam = new TreeMap<String, String>(serdeParam);
-    builder.append("WITH SERDEPROPERTIES ( \n");
-    List<String> serdeCols = new ArrayList<String>();
-    for (Entry<String, String> entry : serdeParam.entrySet()) {
-      serdeCols.add("  '" + entry.getKey() + "'='"
-          + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'");
-    }
-    builder.append(StringUtils.join(serdeCols, ", \n")).append(')');
-    return builder;
-  }
-
-  /**
-   * Write a list of the tables/views in the database to a file.
-   *
-   * @param db
-   *          The database in context.
-   * @param showDesc
-   *        A ShowTablesDesc for tables or views we're interested in.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int showTablesOrViews(Hive db, ShowTablesDesc showDesc) throws HiveException {
-    // get the tables/views for the desired pattern - populate the output stream
-    List<String> tableNames  = null;
-    List<Table> tableObjects = null;
-
-    TableType type       = showDesc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs
-    String dbName        = showDesc.getDbName();
-    String pattern       = showDesc.getPattern(); // if null, all tables/views are returned
-    TableType typeFilter = showDesc.getTypeFilter();
-    String resultsFile   = showDesc.getResFile();
-    boolean isExtended   = showDesc.isExtended();
-
-    if (!db.databaseExists(dbName)) {
-      throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName);
-    }
-
-    LOG.debug("pattern: {}", pattern);
-    LOG.debug("typeFilter: {}", typeFilter);
-    if (type == null) {
-      if (isExtended) {
-        tableObjects = new ArrayList<>();
-        tableObjects.addAll(db.getTableObjectsByType(dbName, pattern, typeFilter));
-        LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size());
-      } else {
-        tableNames = db.getTablesByType(dbName, pattern, typeFilter);
-        LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size());
-      }
-    } else if (type == TableType.MATERIALIZED_VIEW) {
-      tableObjects = new ArrayList<>();
-      tableObjects.addAll(db.getMaterializedViewObjectsByPattern(dbName, pattern));
-      LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size());
-    } else if (type == TableType.VIRTUAL_VIEW) {
-      tableNames = db.getTablesByType(dbName, pattern, type);
-      LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size());
-    } else {
-      throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS");
-    }
-
-    // write the results in the file
-    DataOutputStream outStream = null;
-    try {
-      Path resFile = new Path(resultsFile);
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-      // Sort by name and print
-      if (tableNames != null) {
-        SortedSet<String> sortedSet = new TreeSet<String>(tableNames);
-        formatter.showTables(outStream, sortedSet);
-      } else {
-        Collections.sort(tableObjects, Comparator.comparing(Table::getTableName));
-        if (isExtended) {
-          formatter.showTablesExtended(outStream, tableObjects);
-        } else {
-          formatter.showMaterializedViews(outStream, tableObjects);
-        }
-      }
-      outStream.close();
-    } catch (Exception e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName);
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-    return 0;
-  }
-
   /**
    * Write a list of the columns in the table to a file.
    *
@@ -2949,38 +2450,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-   /**
-   * Lock the table/partition specified
-   * @param db
-   *
-   * @param lockTbl
-   *          the table/partition to be locked along with the mode
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
-    Context ctx = driverContext.getCtx();
-    HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    return txnManager.lockTable(db, lockTbl);
-  }
-
-  /**
-   * Unlock the table/partition specified
-   * @param db
-   *
-   * @param unlockTbl
-   *          the table/partition to be unlocked
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException {
-    Context ctx = driverContext.getCtx();
-    HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    return txnManager.unlockTable(db, unlockTbl);
-  }
-
   /**
    * Shows a description of a function.
    * @param db
@@ -3055,117 +2524,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-  /**
-   * Write the status of tables to a file.
-   *
-   * @param db
-   *          The database in question.
-   * @param showTblStatus
-   *          tables we are interested in
-   * @return Return 0 when execution succeeds and above 0 if it fails.
-   */
-  private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException {
-    // get the tables for the desired pattern - populate the output stream
-    List<Table> tbls = new ArrayList<Table>();
-    Map<String, String> part = showTblStatus.getPartSpec();
-    Partition par = null;
-    if (part != null) {
-      Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern());
-      par = db.getPartition(tbl, part, false);
-      if (par == null) {
-        throw new HiveException("Partition " + part + " for table "
-            + showTblStatus.getPattern() + " does not exist.");
-      }
-      tbls.add(tbl);
-    } else {
-      LOG.debug("pattern: {}", showTblStatus.getPattern());
-      List<String> tblStr = db.getTablesForDb(showTblStatus.getDbName(),
-          showTblStatus.getPattern());
-      SortedSet<String> sortedTbls = new TreeSet<String>(tblStr);
-      Iterator<String> iterTbls = sortedTbls.iterator();
-      while (iterTbls.hasNext()) {
-        // create a row per table name
-        String tblName = iterTbls.next();
-        Table tbl = db.getTable(showTblStatus.getDbName(), tblName);
-        tbls.add(tbl);
-      }
-      LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size());
-    }
-
-    // write the results in the file
-    DataOutputStream outStream = getOutputStream(showTblStatus.getResFile());
-    try {
-      formatter.showTableStatus(outStream, db, conf, tbls, part, par);
-    } catch (Exception e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status");
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-    return 0;
-  }
-
-  /**
-   * Write the properties of a table to a file.
-   *
-   * @param db
-   *          The database in question.
-   * @param showTblPrpt
-   *          This is the table we're interested in.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int showTableProperties(Hive db, ShowTblPropertiesDesc showTblPrpt) throws HiveException {
-    String tableName = showTblPrpt.getTableName();
-
-    // show table properties - populate the output stream
-    Table tbl = db.getTable(tableName, false);
-    try {
-      if (tbl == null) {
-        String errMsg = "Table " + tableName + " does not exist";
-        writeToFile(errMsg, showTblPrpt.getResFile());
-        return 0;
-      }
-
-      LOG.info("DDLTask: show properties for {}", tableName);
-
-      StringBuilder builder = new StringBuilder();
-      String propertyName = showTblPrpt.getPropertyName();
-      if (propertyName != null) {
-        String propertyValue = tbl.getProperty(propertyName);
-        if (propertyValue == null) {
-          String errMsg = "Table " + tableName + " does not have property: " + propertyName;
-          builder.append(errMsg);
-        }
-        else {
-          appendNonNull(builder, propertyName, true);
-          appendNonNull(builder, propertyValue);
-        }
-      }
-      else {
-        Map<String, String> properties = new TreeMap<String, String>(tbl.getParameters());
-        for (Entry<String, String> entry : properties.entrySet()) {
-          appendNonNull(builder, entry.getKey(), true);
-          appendNonNull(builder, entry.getValue());
-        }
-      }
-
-      LOG.info("DDLTask: written data for showing properties of {}", tableName);
-      writeToFile(builder.toString(), showTblPrpt.getResFile());
-
-    } catch (FileNotFoundException e) {
-      LOG.info("show table properties: ", e);
-      return 1;
-    } catch (IOException e) {
-      LOG.info("show table properties: ", e);
-      return 1;
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-
-    return 0;
-  }
-
   private void writeToFile(String data, String file) throws IOException {
     Path resFile = new Path(file);
     FileSystem fs = resFile.getFileSystem(conf);
@@ -3182,211 +2540,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
   }
 
-  /**
-   * Write the description of a table to a file.
-   *
-   * @param db
-   *          The database in question.
-   * @param descTbl
-   *          This is the table we're interested in.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   * @throws MetaException
-   */
-  private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, MetaException {
-    String colPath = descTbl.getColumnPath();
-    String tableName = descTbl.getTableName();
-
-    // describe the table - populate the output stream
-    Table tbl = db.getTable(tableName, false);
-    if (tbl == null) {
-      throw new HiveException(ErrorMsg.INVALID_TABLE, tableName);
-    }
-    Partition part = null;
-    if (descTbl.getPartSpec() != null) {
-      part = db.getPartition(tbl, descTbl.getPartSpec(), false);
-      if (part == null) {
-        throw new HiveException(ErrorMsg.INVALID_PARTITION,
-            StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName);
-      }
-      tbl = part.getTable();
-    }
-
-    DataOutputStream outStream = getOutputStream(descTbl.getResFile());
-    try {
-      LOG.debug("DDLTask: got data for {}", tableName);
-
-      List<FieldSchema> cols = null;
-      List<ColumnStatisticsObj> colStats = null;
-
-      Deserializer deserializer = tbl.getDeserializer(true);
-      if (deserializer instanceof AbstractSerDe) {
-        String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors();
-        if (errorMsgs != null && !errorMsgs.isEmpty()) {
-          throw new SQLException(errorMsgs);
-        }
-      }
-
-      if (colPath.equals(tableName)) {
-        cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ?
-            tbl.getCols() : part.getCols();
-
-        if (!descTbl.isFormatted()) {
-          cols.addAll(tbl.getPartCols());
-        }
-
-        if (tbl.isPartitioned() && part == null) {
-          // No partitioned specified for partitioned table, lets fetch all.
-          Map<String,String> tblProps = tbl.getParameters() == null ? new HashMap<String,String>() : tbl.getParameters();
-          Map<String, Long> valueMap = new HashMap<>();
-          Map<String, Boolean> stateMap = new HashMap<>();
-          for (String stat : StatsSetupConst.SUPPORTED_STATS) {
-            valueMap.put(stat, 0L);
-            stateMap.put(stat, true);
-          }
-          PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
-          int numParts = 0;
-          for (Partition partition : parts) {
-            Map<String, String> props = partition.getParameters();
-            Boolean state = StatsSetupConst.areBasicStatsUptoDate(props);
-            for (String stat : StatsSetupConst.SUPPORTED_STATS) {
-              stateMap.put(stat, stateMap.get(stat) && state);
-              if (props != null && props.get(stat) != null) {
-                valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat)));
-              }
-            }
-            numParts++;
-          }
-          for (String stat : StatsSetupConst.SUPPORTED_STATS) {
-            StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat)));
-            tblProps.put(stat, valueMap.get(stat).toString());
-          }
-          tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts));
-          tbl.setParameters(tblProps);
-        }
-      } else {
-        if (descTbl.isFormatted()) {
-          // when column name is specified in describe table DDL, colPath will
-          // will be table_name.column_name
-          String colName = colPath.split("\\.")[1];
-          String[] dbTab = Utilities.getDbTableName(tableName);
-          List<String> colNames = new ArrayList<String>();
-          colNames.add(colName.toLowerCase());
-          if (null == part) {
-            if (tbl.isPartitioned()) {
-              Map<String,String> tblProps = tbl.getParameters() == null ? new HashMap<String,String>() : tbl.getParameters();
-              if (tbl.isPartitionKey(colNames.get(0))) {
-                FieldSchema partCol = tbl.getPartColByName(colNames.get(0));
-                cols = Collections.singletonList(partCol);
-                PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
-                ColumnInfo ci = new ColumnInfo(partCol.getName(),TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()),null,false);
-                ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, conf);
-                ColumnStatisticsData data = new ColumnStatisticsData();
-                ColStatistics.Range r = cs.getRange();
-                StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue, r == null ? null : r.maxValue,
-                    r == null ? null : r.minValue, r == null ? null : r.maxValue, r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(),
-                    cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), cs.getNumTrues(), cs.getNumFalses());
-                ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data);
-                colStats = Collections.singletonList(cso);
-                StatsSetupConst.setColumnStatsState(tblProps, colNames);
-              } else {
-                cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
-                List<String> parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1);
-                AggrStats aggrStats = db.getAggrColStatsFor(
-                    dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false);
-                colStats = aggrStats.getColStats();
-                if (parts.size() == aggrStats.getPartsFound()) {
-                  StatsSetupConst.setColumnStatsState(tblProps, colNames);
-                } else {
-                  StatsSetupConst.removeColumnStatsState(tblProps, colNames);
-                }
-              }
-              tbl.setParameters(tblProps);
-            } else {
-              cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
-              colStats = db.getTableColumnStatistics(
-                  dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false);
-            }
-          } else {
-            List<String> partitions = new ArrayList<String>();
-            partitions.add(part.getName());
-            cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
-            colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(),
-                dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName());
-          }
-        } else {
-          cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
-        }
-      }
-      PrimaryKeyInfo pkInfo = null;
-      ForeignKeyInfo fkInfo = null;
-      UniqueConstraint ukInfo = null;
-      NotNullConstraint nnInfo = null;
-      DefaultConstraint dInfo = null;
-      CheckConstraint cInfo = null;
-      StorageHandlerInfo storageHandlerInfo = null;
-      if (descTbl.isExt() || descTbl.isFormatted()) {
-        pkInfo = db.getPrimaryKeys(tbl.getDbName(), tbl.getTableName());
-        fkInfo = db.getForeignKeys(tbl.getDbName(), tbl.getTableName());
-        ukInfo = db.getUniqueConstraints(tbl.getDbName(), tbl.getTableName());
-        nnInfo = db.getNotNullConstraints(tbl.getDbName(), tbl.getTableName());
-        dInfo = db.getDefaultConstraints(tbl.getDbName(), tbl.getTableName());
-        cInfo = db.getCheckConstraints(tbl.getDbName(), tbl.getTableName());
-        storageHandlerInfo = db.getStorageHandlerInfo(tbl);
-      }
-      fixDecimalColumnTypeName(cols);
-      // Information for materialized views
-      if (tbl.isMaterializedView()) {
-        final String validTxnsList = db.getConf().get(ValidTxnList.VALID_TXNS_KEY);
-        if (validTxnsList != null) {
-          final List<String> tablesUsed =
-              new ArrayList<>(tbl.getCreationMetadata().getTablesUsed());
-          final ValidTxnWriteIdList currentTxnWriteIds =
-              SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList);
-          final long defaultTimeWindow =
-              HiveConf.getTimeVar(db.getConf(), HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW,
-                  TimeUnit.MILLISECONDS);
-          tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl,
-              currentTxnWriteIds, defaultTimeWindow, tablesUsed, false));
-        }
-      }
-      // In case the query is served by HiveServer2, don't pad it with spaces,
-      // as HiveServer2 output is consumed by JDBC/ODBC clients.
-      boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
-      formatter.describeTable(outStream, colPath, tableName, tbl, part,
-          cols, descTbl.isFormatted(), descTbl.isExt(), isOutputPadded,
-          colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo,
-          storageHandlerInfo);
-
-      LOG.debug("DDLTask: written data for {}", tableName);
-
-    } catch (SQLException e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName);
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-
-    return 0;
-  }
-
-  /**
-   * Fix the type name of a column of type decimal w/o precision/scale specified. This makes
-   * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored
-   * in metastore is "decimal", which is possible with previous hive.
-   *
-   * @param cols columns that to be fixed as such
-   */
-  private static void fixDecimalColumnTypeName(List<FieldSchema> cols) {
-    for (FieldSchema col : cols) {
-      if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) {
-        col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION,
-            HiveDecimal.USER_DEFAULT_SCALE));
-      }
-    }
-  }
-
-  static String writeGrantInfo(List<HivePrivilegeInfo> privileges, boolean testMode) {
+  private String writeGrantInfo(List<HivePrivilegeInfo> privileges, boolean testMode) {
     if (privileges == null || privileges.isEmpty()) {
       return "";
     }
@@ -3425,23 +2579,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return builder.toString();
   }
 
-  static String writeRoleGrantsInfo(List<RolePrincipalGrant> roleGrants, boolean testMode) {
-    if (roleGrants == null || roleGrants.isEmpty()) {
-      return "";
-    }
-    StringBuilder builder = new StringBuilder();
-    //sort the list to get sorted (deterministic) output (for ease of testing)
-    Collections.sort(roleGrants);
-    for (RolePrincipalGrant roleGrant : roleGrants) {
-      appendNonNull(builder, roleGrant.getRoleName(), true);
-      appendNonNull(builder, roleGrant.isGrantOption());
-      appendNonNull(builder, testMode ? -1 : roleGrant.getGrantTime() * 1000L);
-      appendNonNull(builder, roleGrant.getGrantorName());
-    }
-    return builder.toString();
-  }
-
-  static String writeRolesGrantedInfo(List<HiveRoleGrant> roles, boolean testMode) {
+  private String writeRolesGrantedInfo(List<HiveRoleGrant> roles, boolean testMode) {
     if (roles == null || roles.isEmpty()) {
       return "";
     }
@@ -3457,11 +2595,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return builder.toString();
   }
 
-  static StringBuilder appendNonNull(StringBuilder builder, Object value) {
+  private StringBuilder appendNonNull(StringBuilder builder, Object value) {
     return appendNonNull(builder, value, false);
   }
 
-  static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) {
+  private StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) {
     if (!firstColumn) {
       builder.append((char)separator);
     } else if (builder.length() > 0) {
@@ -4181,35 +3319,26 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   }
 
    /**
-   * Drop a given table or some partitions. DropTableDesc is currently used for both.
+   * Drop a given partitions.
    *
    * @param db
    *          The database in question.
-   * @param dropTbl
-   *          This is the table we're dropping.
+   * @param dropPartition
+   *          This is the partition we're dropping.
    * @throws HiveException
    *           Throws this exception if an unexpected error occurs.
    */
-  private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException {
+  private void dropPartitions(Hive db, DropPartitionDesc dropPartition) throws HiveException {
     // We need to fetch the table before it is dropped so that it can be passed to
     // post-execution hook
     Table tbl = null;
     try {
-      tbl = db.getTable(dropTbl.getTableName());
+      tbl = db.getTable(dropPartition.getTableName());
     } catch (InvalidTableException e) {
       // drop table is idempotent
     }
 
-    if (dropTbl.getPartSpecs() == null) {
-      dropTable(db, tbl, dropTbl);
-    } else {
-      dropPartitions(db, tbl, dropTbl);
-    }
-  }
-
-  private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
-
-    ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
+    ReplicationSpec replicationSpec = dropPartition.getReplicationSpec();
     if (replicationSpec.isInReplicationScope()){
       /**
        * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x
@@ -4231,7 +3360,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         return;
       }
 
-      for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){
+      for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()){
         List<Partition> partitions = new ArrayList<>();
         try {
           db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions);
@@ -4250,12 +3379,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
     // ifExists is currently verified in DDLSemanticAnalyzer
     List<Partition> droppedParts
-        = db.dropPartitions(dropTbl.getTableName(),
-                            dropTbl.getPartSpecs(),
+        = db.dropPartitions(dropPartition.getTableName(),
+                            dropPartition.getPartSpecs(),
                             PartitionDropOptions.instance()
                                                 .deleteData(true)
                                                 .ifExists(true)
-                                                .purgeData(dropTbl.getIfPurge()));
+                                                .purgeData(dropPartition.getIfPurge()));
     for (Partition partition : droppedParts) {
       console.printInfo("Dropped the partition " + partition.getName());
       // We have already locked the table, don't lock the partitions.
@@ -4263,102 +3392,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
   }
 
-  private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
-    // This is a true DROP TABLE
-    if (tbl != null && dropTbl.getValidationRequired()) {
-      if (tbl.isView()) {
-        if (!dropTbl.getExpectView()) {
-          if (dropTbl.getIfExists()) {
-            return;
-          }
-          if (dropTbl.getExpectMaterializedView()) {
-            throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW");
-          } else {
-            throw new HiveException("Cannot drop a view with DROP TABLE");
-          }
-        }
-      } else if (tbl.isMaterializedView()) {
-        if (!dropTbl.getExpectMaterializedView()) {
-          if (dropTbl.getIfExists()) {
-            return;
-          }
-          if (dropTbl.getExpectView()) {
-            throw new HiveException("Cannot drop a materialized view with DROP VIEW");
-          } else {
-            throw new HiveException("Cannot drop a materialized view with DROP TABLE");
-          }
-        }
-      } else {
-        if (dropTbl.getExpectView()) {
-          if (dropTbl.getIfExists()) {
-            return;
-          }
-          throw new HiveException(
-              "Cannot drop a base table with DROP VIEW");
-        } else if (dropTbl.getExpectMaterializedView()) {
-          if (dropTbl.getIfExists()) {
-            return;
-          }
-          throw new HiveException(
-              "Cannot drop a base table with DROP MATERIALIZED VIEW");
-        }
-      }
-    }
-
-    ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
-    if ((tbl!= null) && replicationSpec.isInReplicationScope()){
-      /**
-       * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
-       * matches a DROP TABLE IF OLDER THAN(x) semantic.
-       *
-       * Ideally, commands executed under the scope of replication need to be idempotent and resilient
-       * to repeats. What can happen, sometimes, is that a drone processing a replication task can
-       * have been abandoned for not returning in time, but still execute its task after a while,
-       * which should not result in it mucking up data that has been impressed later on. So, for eg.,
-       * if we create partition P1, followed by droppping it, followed by creating it yet again,
-       * the replication of that drop should not drop the newer partition if it runs after the destination
-       * object is already in the newer state.
-       *
-       * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
-       * drop the object in question(will return false if object is newer than the event, true if not)
-       *
-       * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
-       * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
-       * do one more thing - if it does not drop the table because the table is in a newer state, it must
-       * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
-       * acts like a recursive DROP TABLE IF OLDER.
-       */
-      if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())){
-        // Drop occured as part of replicating a drop, but the destination
-        // table was newer than the event being replicated. Ignore, but drop
-        // any partitions inside that are older.
-        if (tbl.isPartitioned()){
-
-          PartitionIterable partitions = new PartitionIterable(db,tbl,null,
-                  conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
-
-          for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){
-            db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true);
-          }
-        }
-        LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", dropTbl.getTableName());
-        return; // table is newer, leave it be.
-      }
-    }
-
-    // drop the table
-    // TODO: API w/catalog name
-    db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
-    if (tbl != null) {
-      // Remove from cache if it is a materialized view
-      if (tbl.isMaterializedView()) {
-        HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl);
-      }
-      // We have already locked the table in DDLSemanticAnalyzer, don't do it again here
-      addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
-    }
-  }
-
   /**
    * Update last_modified_by and last_modified_time parameters in parameter map.
    *
@@ -4375,10 +3408,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return true;
   }
 
-  private void validateSerDe(String serdeName) throws HiveException {
-    validateSerDe(serdeName, conf);
-  }
-
   /**
    * Check if the given serde is valid.
    */
@@ -4396,257 +3425,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   }
 
   /**
-   * Create a new table.
-   *
-   * @param db
-   *          The database in question.
-   * @param crtTbl
-   *          This is the table we're creating.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
-    // create the table
-    Table tbl = crtTbl.toTable(conf);
-    List<SQLPrimaryKey> primaryKeys = crtTbl.getPrimaryKeys();
-    List<SQLForeignKey> foreignKeys = crtTbl.getForeignKeys();
-    List<SQLUniqueConstraint> uniqueConstraints = crtTbl.getUniqueConstraints();
-    List<SQLNotNullConstraint> notNullConstraints = crtTbl.getNotNullConstraints();
-    List<SQLDefaultConstraint> defaultConstraints = crtTbl.getDefaultConstraints();
-    List<SQLCheckConstraint> checkConstraints = crtTbl.getCheckConstraints();
-    LOG.debug("creating table {} on {}",tbl.getFullyQualifiedName(),tbl.getDataLocation());
-
-    if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())){
-      // if this is a replication spec, then replace-mode semantics might apply.
-      // if we're already asking for a table replacement, then we can skip this check.
-      // however, otherwise, if in replication scope, and we've not been explicitly asked
-      // to replace, we should check if the object we're looking at exists, and if so,
-      // trigger replace-mode semantics.
-      Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false);
-      if (existingTable != null){
-        if (crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())){
-          crtTbl.setReplaceMode(true); // we replace existing table.
-          ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters());
-        } else {
-          LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update",
-                  crtTbl.getTableName());
-          return 0; // no replacement, the existing table state is newer than our update.
-        }
-      }
-    }
-
-    // create the table
-    if (crtTbl.getReplaceMode()) {
-      ReplicationSpec replicationSpec = crtTbl.getReplicationSpec();
-      long writeId = 0;
-      EnvironmentContext environmentContext = null;
-      if (replicationSpec != null && replicationSpec.isInReplicationScope()) {
-        if (replicationSpec.isMigratingToTxnTable()) {
-          // for migration we start the transaction and allocate write id in repl txn task for migration.
-          String writeIdPara = conf.get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID);
-          if (writeIdPara == null) {
-            throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration");
-          }
-          writeId = Long.parseLong(writeIdPara);
-        } else {
-          writeId = crtTbl.getReplWriteId();
-        }
-
-        // In case of replication statistics is obtained from the source, so do not update those
-        // on replica. Since we are not replicating statisics for transactional tables, do not do
-        // so for transactional tables right now.
-        if (!AcidUtils.isTransactionalTable(crtTbl)) {
-          environmentContext = new EnvironmentContext();
-          environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
-        }
-      }
-
-      // replace-mode creates are really alters using CreateTableDesc.
-      db.alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false,
-              environmentContext, true, writeId);
-    } else {
-      if ((foreignKeys != null && foreignKeys.size() > 0) ||
-          (primaryKeys != null && primaryKeys.size() > 0) ||
-          (uniqueConstraints != null && uniqueConstraints.size() > 0) ||
-          (notNullConstraints != null && notNullConstraints.size() > 0) ||
-          (checkConstraints!= null && checkConstraints.size() > 0) ||
-          defaultConstraints != null && defaultConstraints.size() > 0) {
-        db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys,
-                uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
-      } else {
-        db.createTable(tbl, crtTbl.getIfNotExists());
-      }
-      Long mmWriteId = crtTbl.getInitialMmWriteId();
-      if (crtTbl.isCTAS() || mmWriteId != null) {
-        Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
-        if (crtTbl.isCTAS()) {
-          DataContainer dc = new DataContainer(createdTable.getTTable());
-          queryState.getLineageState().setLineage(
-                  createdTable.getPath(), dc, createdTable.getCols()
-          );
-        }
-      }
-    }
-    addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
-    return 0;
-  }
-
-
-  /**
-   * Create a new table like an existing table.
-   *
-   * @param db
-   *          The database in question.
-   * @param crtTbl
-   *          This is the table we're creating.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exception {
-    // Get the existing table
-    Table oldtbl = db.getTable(crtTbl.getLikeTableName());
-    Table tbl;
-    if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW ||
-        oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) {
-      String targetTableName = crtTbl.getTableName();
-      tbl=db.newTable(targetTableName);
-
-      if (crtTbl.getTblProps() != null) {
-        tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
-      }
-
-      tbl.setTableType(TableType.MANAGED_TABLE);
-
-      if (crtTbl.isExternal()) {
-        tbl.setProperty("EXTERNAL", "TRUE");
-        tbl.setTableType(TableType.EXTERNAL_TABLE);
-        // partition discovery is on by default
-        tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true");
-      }
-
-      tbl.setFields(oldtbl.getCols());
-      tbl.setPartCols(oldtbl.getPartCols());
-
-      if (crtTbl.getDefaultSerName() == null) {
-        LOG.info("Default to LazySimpleSerDe for table {}", targetTableName);
-        tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-      } else {
-        // let's validate that the serde exists
-        validateSerDe(crtTbl.getDefaultSerName());
-        tbl.setSerializationLib(crtTbl.getDefaultSerName());
-      }
-
-      if (crtTbl.getDefaultSerdeProps() != null) {
-        Iterator<Entry<String, String>> iter = crtTbl.getDefaultSerdeProps().entrySet()
-            .iterator();
-        while (iter.hasNext()) {
-          Entry<String, String> m = iter.next();
-          tbl.setSerdeParam(m.getKey(), m.getValue());
-        }
-      }
-
-      tbl.setInputFormatClass(crtTbl.getDefaultInputFormat());
-      tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat());
-
-      tbl.getTTable().getSd().setInputFormat(
-          tbl.getInputFormatClass().getName());
-      tbl.getTTable().getSd().setOutputFormat(
-          tbl.getOutputFormatClass().getName());
-    } else {
-      tbl=oldtbl;
-
-      // find out database name and table name of target table
-      String targetTableName = crtTbl.getTableName();
-      String[] names = Utilities.getDbTableName(targetTableName);
-
-      tbl.setDbName(names[0]);
-      tbl.setTableName(names[1]);
-
-      // using old table object, hence reset the owner to current user for new table.
-      tbl.setOwner(SessionState.getUserFromAuthenticator());
-
-      if (crtTbl.getLocation() != null) {
-        tbl.setDataLocation(new Path(crtTbl.getLocation()));
-      } else {
-        tbl.unsetDataLocation();
-      }
-
-      Class<? extends Deserializer> serdeClass = oldtbl.getDeserializerClass();
-
-      Map<String, String> params = tbl.getParameters();
-      // We should copy only those table parameters that are specified in the config.
-      SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class);
-      String paramsStr = HiveConf.getVar(conf, HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST);
-
-      Set<String> retainer = new HashSet<String>();
-      // for non-native table, property storage_handler should be retained
-      retainer.add(META_TABLE_STORAGE);
-      if (spec != null && spec.schemaProps() != null) {
-        retainer.addAll(Arrays.asList(spec.schemaProps()));
-      }
-      if (paramsStr != null) {
-        retainer.addAll(Arrays.asList(paramsStr.split(",")));
-      }
-      if (!retainer.isEmpty()) {
-        params.keySet().retainAll(retainer);
-      } else {
-        params.clear();
-      }
-
-      if (crtTbl.getTblProps() != null) {
-        params.putAll(crtTbl.getTblProps());
-      }
-
-      if (crtTbl.isUserStorageFormat()) {
-        tbl.setInputFormatClass(crtTbl.getDefaultInputFormat());
-        tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat());
-        tbl.getTTable().getSd().setInputFormat(
-        tbl.getInputFormatClass().getName());
-        tbl.getTTable().getSd().setOutputFormat(
-        tbl.getOutputFormatClass().getName());
-        if (crtTbl.getDefaultSerName() == null) {
-          LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName);
-          tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-        } else {
-          // let's validate that the serde exists
-          validateSerDe(crtTbl.getDefaultSerName());
-          tbl.setSerializationLib(crtTbl.getDefaultSerName());
-        }
-      }
-
-      tbl.getTTable().setTemporary(crtTbl.isTemporary());
-      tbl.getTTable().unsetId();
-
-      if (crtTbl.isExternal()) {
-        tbl.setProperty("EXTERNAL", "TRUE");
-        tbl.setTableType(TableType.EXTERNAL_TABLE);
-        // partition discovery is on by default
-        tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true");
-      } else {
-        tbl.getParameters().remove("EXTERNAL");
-      }
-    }
-
-    // If location is specified - ensure that it is a full qualified name
-    if (DDLTask.doesTableNeedLocation(tbl)) {
-      makeLocationQualified(tbl.getDbName(), tbl, conf);
-    }
-
-    if (crtTbl.getLocation() == null && !tbl.isPartitioned()
-        && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-      StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(),
-          MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
-    }
-
-    // create the table
-    db.createTable(tbl, crtTbl.getIfNotExists());
-    addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
-    return 0;
-  }
-
-  /**
    * Create a new view.
    *
    * @param db
@@ -4725,50 +3503,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-  private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
-    if (truncateTableDesc.getColumnIndexes() != null) {
-      ColumnTruncateWork truncateWork = new ColumnTruncateWork(
-          truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(),
-          truncateTableDesc.getOutputDir());
-      truncateWork.setListBucketingCtx(truncateTableDesc.getLbCtx());
-      truncateWork.setMapperCannotSpanPartns(true);
-      DriverContext driverCxt = new DriverContext();
-      ColumnTruncateTask taskExec = new ColumnTruncateTask();
-      taskExec.initialize(queryState, null, driverCxt, null);
-      taskExec.setWork(truncateWork);
-      taskExec.setQueryPlan(this.getQueryPlan());
-      subtask = taskExec;
-      int ret = taskExec.execute(driverCxt);
-      if (subtask.getException() != null) {
-        setException(subtask.getException());
-      }
-      return ret;
-    }
-
-    String tableName = truncateTableDesc.getTableName();
-    Map<String, String> partSpec = truncateTableDesc.getPartSpec();
-
-    ReplicationSpec replicationSpec = truncateTableDesc.getReplicationSpec();
-    if (!allowOperationInReplicationScope(db, tableName, partSpec, replicationSpec)) {
-      // no truncate, the table is missing either due to drop/rename which follows the truncate.
-      // or the existing table is newer than our update.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update",
-          tableName,
-          (partSpec == null) ? "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values())));
-      }
-      return 0;
-    }
-
-    try {
-      db.truncateTable(tableName, partSpec,
-              replicationSpec != null && replicationSpec.isInReplicationScope() ? truncateTableDesc.getWriteId() : 0L);
-    } catch (Exception e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
-    }
-    return 0;
-  }
-
   private int exchangeTablePartition(Hive db,
       AlterTableExchangePartition exchangePartition) throws HiveException {
     Map<String, String> partitionSpecs = exchangePartition.getPartitionSpecs();
@@ -4803,32 +3537,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return "DDL";
   }
 
-   /**
-   * Make location in specified sd qualified.
-   *
-   * @param databaseName
-   *          Database name.
-   */
-  public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException {
-    Path path = null;
-    StorageDescriptor sd = table.getTTable().getSd();
-    // If the table's location is currently unset, it is left unset, allowing the metastore to
-    // fill in the table's location.
-    // Note that the previous logic for some reason would make a special case if the DB was the
-    // default database, and actually attempt to generate a  location.
-    // This seems incorrect and uncessary, since the metastore is just as able to fill in the
-    // default table location in the case of the default DB, as it is for non-default DBs.
-    if (sd.isSetLocation())
-    {
-      path = new Path(sd.getLocation());
-    }
-
-    if (path != null)
-    {
-      sd.setLocation(Utilities.getQualifiedPath(conf, path));
-    }
-  }
-
   /**
    * Validate if the given table/partition is eligible for update
    *
@@ -4868,21 +3576,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return false;
   }
 
-  public static boolean doesTableNeedLocation(Table tbl) {
-    // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers,
-    // this method could be moved to the HiveStorageHandler interface.
-    boolean retval = true;
-    if (tbl.getStorageHandler() != null) {
-      // TODO: why doesn't this check class name rather than toString?
-      String sh = tbl.getStorageHandler().toString();
-      retval = !sh.equals("org.apache.hadoop.hive.hbase.HBaseStorageHandler")
-              && !sh.equals(Constants.DRUID_HIVE_STORAGE_HANDLER_ID)
-              && !sh.equals(Constants.JDBC_HIVE_STORAGE_HANDLER_ID)
-              && !sh.equals("org.apache.hadoop.hive.accumulo.AccumuloStorageHandler");
-    }
-    return retval;
-  }
-
   private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc desc) throws HiveException, TException {
     String dbNameOrPattern = desc.getDatabaseName();
     String tableNameOrPattern = desc.getTableName();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index 3308797..956c4ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -617,7 +617,7 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
     return exception;
   }
 
-  protected void setException(Throwable ex) {
+  public void setException(Throwable ex) {
     exception = ex;
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index c1773c9..0add38b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.ReplLogger;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
@@ -349,8 +349,8 @@ public class LoadPartitions {
     Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecsExpr =
             ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec));
     if (partSpecsExpr.size() > 0) {
-      DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(),
-              partSpecsExpr, null, true, event.replicationSpec());
+      DropPartitionDesc dropPtnDesc = new DropPartitionDesc(table.getFullyQualifiedName(), partSpecsExpr, true,
+          event.replicationSpec());
       dropPtnTask = TaskFactory.get(
               new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf
       );
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
index 3b0b67a..b335f19 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -45,8 +47,6 @@ import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.ReplLogger;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
@@ -325,6 +325,6 @@ public class LoadTable {
     assert(table != null);
     DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(),
             true, false, event.replicationSpec());
-    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf);
+    return TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf);
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 3961baa..50a233d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.LockType;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.Entity;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
@@ -68,7 +69,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.HadoopShims;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 4aea872..800d80a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -41,13 +41,13 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.apache.thrift.TException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 43dba73..bb46bf9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -26,11 +26,11 @@ import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
 import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 
 import java.util.List;
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
index 0abec56..0e148ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
 import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -38,8 +40,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 
 /**
  * An implementation HiveTxnManager that includes internal methods that all
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 17576ff..33d157d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -127,7 +127,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
@@ -3363,7 +3363,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public List<Partition> dropPartitions(Table table, List<String>partDirNames,
       boolean deleteData, boolean ifExists) throws HiveException {
     // partitions to be dropped in this batch
-    List<DropTableDesc.PartSpec> partSpecs = new ArrayList<>(partDirNames.size());
+    List<DropPartitionDesc.PartSpec> partSpecs = new ArrayList<>(partDirNames.size());
 
     // parts of the partition
     String[] parts = null;
@@ -3413,7 +3413,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
 
       // Add the expression to partition specification
-      partSpecs.add(new DropTableDesc.PartSpec(expr, partSpecKey));
+      partSpecs.add(new DropPartitionDesc.PartSpec(expr, partSpecKey));
 
       // Increment dropKey to get a new key for hash map
       ++partSpecKey;
@@ -3423,14 +3423,14 @@ private void constructOneLBLocationMap(FileStatus fSta,
     return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists);
   }
 
-  public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs,
+  public List<Partition> dropPartitions(String tblName, List<DropPartitionDesc.PartSpec> partSpecs,
       boolean deleteData, boolean ifExists) throws HiveException {
     String[] names = Utilities.getDbTableName(tblName);
     return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists);
   }
 
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<DropTableDesc.PartSpec> partSpecs,  boolean deleteData,
+      List<DropPartitionDesc.PartSpec> partSpecs,  boolean deleteData,
       boolean ifExists) throws HiveException {
     return dropPartitions(dbName, tblName, partSpecs,
                           PartitionDropOptions.instance()
@@ -3438,19 +3438,19 @@ private void constructOneLBLocationMap(FileStatus fSta,
                                               .ifExists(ifExists));
   }
 
-  public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs,
+  public List<Partition> dropPartitions(String tblName, List<DropPartitionDesc.PartSpec> partSpecs,
                                         PartitionDropOptions dropOptions) throws HiveException {
     String[] names = Utilities.getDbTableName(tblName);
     return dropPartitions(names[0], names[1], partSpecs, dropOptions);
   }
 
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<DropTableDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException {
+      List<DropPartitionDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException {
     try {
       Table tbl = getTable(dbName, tblName);
       List<org.apache.hadoop.hive.metastore.utils.ObjectPair<Integer, byte[]>> partExprs =
           new ArrayList<>(partSpecs.size());
-      for (DropTableDesc.PartSpec partSpec : partSpecs) {
+      for (DropPartitionDesc.PartSpec partSpec : partSpecs) {
         partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(),
             SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec())));
       }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index a3ae886..f28d68f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
@@ -490,7 +491,7 @@ public class JsonMetaDataFormatter implements MetaDataFormatter {
    */
   @Override
   public void showDatabaseDescription(DataOutputStream out, String database, String comment,
-      String location, String ownerName, String ownerType, Map<String, String> params)
+      String location, String ownerName, PrincipalType ownerType, Map<String, String> params)
           throws HiveException {
     MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment)
         .put("location", location);
@@ -498,7 +499,7 @@ public class JsonMetaDataFormatter implements MetaDataFormatter {
       builder.put("owner", ownerName);
     }
     if (null != ownerType) {
-      builder.put("ownerType", ownerType);
+      builder.put("ownerType", ownerType.name());
     }
     if (null != params && !params.isEmpty()) {
       builder.put("params", params);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index 4180dc4..c9dd854 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.api.WMPool;
 import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc;
 import org.apache.hadoop.hive.ql.metadata.CheckConstraint;
 import org.apache.hadoop.hive.ql.metadata.DefaultConstraint;
 import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
@@ -53,7 +54,6 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.UniqueConstraint;
 import org.apache.hadoop.hive.ql.metadata.UniqueConstraint.UniqueConstraintCol;
 import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo.ForeignKeyCol;
-import org.apache.hadoop.hive.ql.plan.DescTableDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.serde2.io.DateWritableV2;
 import org.apache.hive.common.util.HiveStringUtils;
@@ -764,7 +764,7 @@ public final class MetaDataFormatUtils {
     if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) {
       return new JsonMetaDataFormatter();
     } else {
-      return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS), conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY));
+      return new TextMetaDataFormatter(conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY));
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
index 80e3d8b..b7e5ebe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
@@ -20,15 +20,14 @@ package org.apache.hadoop.hive.ql.metadata.formatting;
 
 import java.io.DataOutputStream;
 import java.io.OutputStream;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import javax.annotation.Nullable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
@@ -136,9 +135,9 @@ public interface MetaDataFormatter {
   /**
    * Describe a database.
    */
-  public void showDatabaseDescription (DataOutputStream out, String database, String comment,
-      String location, String ownerName, String ownerType, Map<String, String> params)
-          throws HiveException;
+  void showDatabaseDescription(DataOutputStream out, String database, String comment, String location,
+      String ownerName, PrincipalType ownerType, Map<String, String> params)
+      throws HiveException;
 
   void showResourcePlans(DataOutputStream out, List<WMResourcePlan> resourcePlans)
       throws HiveException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index fbeb9c8..f7704bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -29,10 +29,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo;
-import org.apache.hadoop.hive.ql.plan.DescTableDesc;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,6 +43,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
@@ -73,14 +72,9 @@ class TextMetaDataFormatter implements MetaDataFormatter {
   private static final int separator = Utilities.tabCode;
   private static final int terminator = Utilities.newLineCode;
 
-  /** The number of columns to be used in pretty formatting metadata output.
-   * If -1, then the current terminal width is auto-detected and used.
-   */
-  private final int prettyOutputNumCols;
   private final boolean showPartColsSeparately;
 
-  public TextMetaDataFormatter(int prettyOutputNumCols, boolean partColsSeparately) {
-    this.prettyOutputNumCols = prettyOutputNumCols;
+  public TextMetaDataFormatter(boolean partColsSeparately) {
     this.showPartColsSeparately = partColsSeparately;
   }
 
@@ -629,7 +623,7 @@ class TextMetaDataFormatter implements MetaDataFormatter {
    */
   @Override
   public void showDatabaseDescription(DataOutputStream outStream, String database, String comment,
-      String location, String ownerName, String ownerType, Map<String, String> params)
+      String location, String ownerName, PrincipalType ownerType, Map<String, String> params)
           throws HiveException {
     try {
       outStream.write(database.getBytes("UTF-8"));
@@ -647,7 +641,7 @@ class TextMetaDataFormatter implements MetaDataFormatter {
       }
       outStream.write(separator);
       if (ownerType != null) {
-        outStream.write(ownerType.getBytes("UTF-8"));
+        outStream.write(ownerType.name().getBytes("UTF-8"));
       }
       outStream.write(separator);
       if (params != null && !params.isEmpty()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
index cf54aa3..3f5b0e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer;
 
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
@@ -100,22 +101,23 @@ public class QueryPlanPostProcessor {
         new QueryPlanPostProcessor(((ExplainWork)work).getRootTasks(), acidSinks, executionId);
       }
       else if(work instanceof ReplLoadWork ||
-        work instanceof ReplStateLogWork ||
-        work instanceof GenTezWork ||
-        work instanceof GenSparkWork ||
-        work instanceof ArchiveWork ||
-        work instanceof ColumnStatsUpdateWork ||
-        work instanceof BasicStatsWork ||
-        work instanceof ConditionalWork ||
-        work instanceof CopyWork ||
-        work instanceof DDLWork ||
-        work instanceof DependencyCollectionWork ||
-        work instanceof ExplainSQRewriteWork ||
-        work instanceof FetchWork ||
-        work instanceof FunctionWork ||
-        work instanceof MoveWork ||
-        work instanceof BasicStatsNoJobWork ||
-        work instanceof StatsWork) {
+          work instanceof ReplStateLogWork ||
+          work instanceof GenTezWork ||
+          work instanceof GenSparkWork ||
+          work instanceof ArchiveWork ||
+          work instanceof ColumnStatsUpdateWork ||
+          work instanceof BasicStatsWork ||
+          work instanceof ConditionalWork ||
+          work instanceof CopyWork ||
+          work instanceof DDLWork ||
+          work instanceof DDLWork2 ||
+          work instanceof DependencyCollectionWork ||
+          work instanceof ExplainSQRewriteWork ||
+          work instanceof FetchWork ||
+          work instanceof FunctionWork ||
+          work instanceof MoveWork ||
+          work instanceof BasicStatsNoJobWork ||
+          work instanceof StatsWork) {
         LOG.debug("Found " + work.getClass().getName() + " - no FileSinkOperation can be present.  executionId=" + executionId);
       }
       else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
index 4b2958a..960dd34 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
@@ -36,7 +36,10 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc;
+import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -47,9 +50,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
-import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.ExportWork;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
@@ -151,7 +152,7 @@ public class AcidExportSemanticAnalyzer extends RewriteSemanticAnalyzer {
     try {
       ReadEntity dbForTmpTable = new ReadEntity(db.getDatabase(exportTable.getDbName()));
       inputs.add(dbForTmpTable); //so the plan knows we are 'reading' this db - locks, security...
-      DDLTask createTableTask = (DDLTask) TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), ctlt), conf);
+      DDLTask2 createTableTask = (DDLTask2) TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), ctlt), conf);
       createTableTask.setConf(conf); //above get() doesn't set it
       createTableTask.execute(new DriverContext(new Context(conf)));
       newTable = db.getTable(newTableName);
@@ -199,7 +200,7 @@ public class AcidExportSemanticAnalyzer extends RewriteSemanticAnalyzer {
     // {@link DDLSemanticAnalyzer#analyzeDropTable(ASTNode ast, TableType expectedType)
     ReplicationSpec replicationSpec = new ReplicationSpec();
     DropTableDesc dropTblDesc = new DropTableDesc(newTableName, TableType.MANAGED_TABLE, false, true, replicationSpec);
-    Task<DDLWork> dropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), conf);
+    Task<DDLWork2> dropTask = TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), conf);
     exportTask.addDependentTask(dropTask);
     markReadEntityForUpdate();
     if (ctx.isExplainPlan()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 4a542ae..baf6356 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -76,9 +76,19 @@ import org.apache.hadoop.hive.ql.ddl.database.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.database.ShowCreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc;
 import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc;
+import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc;
+import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -131,9 +141,8 @@ import org.apache.hadoop.hive.ql.plan.DDLDesc;
 import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
-import org.apache.hadoop.hive.ql.plan.DescTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
@@ -145,7 +154,6 @@ import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.plan.KillQueryDesc;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.MsckDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
@@ -155,21 +163,14 @@ import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
-import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
 import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
 import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
-import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -1445,8 +1446,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
     DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        dropTblDesc)));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), dropTblDesc)));
   }
 
   private void analyzeTruncateTable(ASTNode ast) throws SemanticException {
@@ -1493,7 +1493,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       setAcidDdlDesc(truncateTblDesc);
     }
 
-    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
+    DDLWork2 ddlWork = new DDLWork2(getInputs(), getOutputs(), truncateTblDesc);
     Task<?> truncateTask = TaskFactory.get(ddlWork);
 
     // Is this a truncate column command
@@ -2518,26 +2518,26 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       validateTable(tableName, partSpec);
     }
 
-    DescTableDesc descTblDesc = new DescTableDesc(
-      ctx.getResFile(), tableName, partSpec, colPath);
-
     boolean showColStats = false;
+    boolean isFormatted = false;
+    boolean isExt = false;
     if (ast.getChildCount() == 2) {
       int descOptions = ast.getChild(1).getType();
-      descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED);
-      descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED);
+      isFormatted = descOptions == HiveParser.KW_FORMATTED;
+      isExt = descOptions == HiveParser.KW_EXTENDED;
       // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath
       // will contain tablename.column_name. If column_name is not specified
       // colPath will be equal to tableName. This is how we can differentiate
       // if we are describing a table or column
-      if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) {
+      if (!colPath.equalsIgnoreCase(tableName) && isFormatted) {
         showColStats = true;
       }
     }
 
     inputs.add(new ReadEntity(getTable(tableName)));
-    Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        descTblDesc));
+
+    DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName, partSpec, colPath, isExt, isFormatted);
+    Task<?> ddlTask = TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), descTblDesc));
     rootTasks.add(ddlTask);
     String schema = DescTableDesc.getSchema(showColStats);
     setFetchTask(createFetchTask(schema));
@@ -2620,14 +2620,12 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException {
     String dbName = getUnescapedName((ASTNode)ast.getChild(0));
-    ShowCreateDatabaseDesc showCreateDbDesc =
-        new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
+    ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
 
     Database database = getDatabase(dbName);
     inputs.add(new ReadEntity(database));
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showCreateDbDesc)));
-    setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateDbDesc)));
+    setFetchTask(createFetchTask(ShowCreateDatabaseDesc.SCHEMA));
   }
 
 
@@ -2638,9 +2636,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     Table tab = getTable(tableName);
     inputs.add(new ReadEntity(tab));
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showCreateTblDesc)));
-    setFetchTask(createFetchTask(showCreateTblDesc.getSchema()));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateTblDesc)));
+    setFetchTask(createFetchTask(ShowCreateTableDesc.SCHEMA));
   }
 
   private void analyzeShowDatabases(ASTNode ast) throws SemanticException {
@@ -2686,8 +2683,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended);
     inputs.add(new ReadEntity(getDatabase(dbName)));
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showTblsDesc)));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblsDesc)));
     setFetchTask(createFetchTask(showTblsDesc.getSchema()));
   }
 
@@ -2763,15 +2759,13 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       validateTable(tableNames, partSpec);
     }
 
-    showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName,
-        tableNames, partSpec);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showTblStatusDesc)));
-    setFetchTask(createFetchTask(showTblStatusDesc.getSchema()));
+    showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, tableNames, partSpec);
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblStatusDesc)));
+    setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA));
   }
 
   private void analyzeShowTableProperties(ASTNode ast) throws SemanticException {
-    ShowTblPropertiesDesc showTblPropertiesDesc;
+    ShowTablePropertiesDesc showTblPropertiesDesc;
     String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
     String propertyName = null;
     if (ast.getChildCount() > 1) {
@@ -2781,11 +2775,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     String tableNames = getDotName(qualified);
     validateTable(tableNames, null);
 
-    showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames,
-        propertyName);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showTblPropertiesDesc)));
-    setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema()));
+    showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableNames, propertyName);
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblPropertiesDesc)));
+    setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA));
   }
 
   /**
@@ -2920,8 +2912,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
       dbName = unescapeIdentifier(ast.getChild(1).getText());
       validateDatabase(dbName);
-      showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
-      showViewsDesc.setType(TableType.VIRTUAL_VIEW);
+      showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW);
       break;
     case 3: // Uses a pattern and specifies a DB
       assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
@@ -2931,13 +2922,11 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW);
       break;
     default: // No pattern or DB
-      showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
-      showViewsDesc.setType(TableType.VIRTUAL_VIEW);
+      showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW);
       break;
     }
 
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showViewsDesc)));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showViewsDesc)));
     setFetchTask(createFetchTask(showViewsDesc.getSchema()));
   }
 
@@ -2960,8 +2949,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
       dbName = unescapeIdentifier(ast.getChild(1).getText());
       validateDatabase(dbName);
-      showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
-      showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW);
+      showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW);
       break;
     case 3: // Uses a pattern and specifies a DB
       assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
@@ -2972,13 +2960,11 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
           ctx.getResFile(), dbName, materializedViewNames, TableType.MATERIALIZED_VIEW);
       break;
     default: // No pattern or DB
-      showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
-      showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW);
+      showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW);
       break;
     }
 
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        showMaterializedViewsDesc)));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showMaterializedViewsDesc)));
     setFetchTask(createFetchTask(showMaterializedViewsDesc.getSchema()));
   }
 
@@ -3005,10 +2991,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec,
-        HiveConf.getVar(conf, ConfVars.HIVEQUERYID));
-    lockTblDesc.setQueryStr(this.ctx.getCmd());
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        lockTblDesc)));
+        HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd());
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), lockTblDesc)));
 
     // Need to initialize the lock manager
     ctx.setNeedLockMgr(true);
@@ -3107,8 +3091,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        unlockTblDesc)));
+    rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), unlockTblDesc)));
 
     // Need to initialize the lock manager
     ctx.setNeedLockMgr(true);
@@ -3438,9 +3421,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists);
 
-    DropTableDesc dropTblDesc =
-        new DropTableDesc(getDotName(qualified), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null,
-                mustPurge, replicationSpec);
+    DropPartitionDesc dropTblDesc =
+        new DropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc)));
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index b6b4f58..cb9584c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -58,7 +60,6 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
@@ -565,7 +566,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
                                        ReplicationSpec replicationSpec) {
     DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), table.getTableType(),
             true, false, replicationSpec);
-    return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf());
+    return TaskFactory.get(new DDLWork2(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf());
   }
 
   private static Task<? extends Serializable> alterTableTask(ImportTableDesc tableDesc,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 7b30b59..77e1818 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.QueryProperties;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
@@ -43,7 +44,6 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
index a2f6fbb..0405ee8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
@@ -30,8 +30,8 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 
 /**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 05257c9..18dbbb2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -98,6 +98,10 @@ import org.apache.hadoop.hive.ql.QueryProperties;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.cache.results.CacheUsage;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc;
+import org.apache.hadoop.hive.ql.ddl.table.PreInsertTableDesc;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -190,8 +194,6 @@ import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
-import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
@@ -8079,7 +8081,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private void createPreInsertDesc(Table table, boolean overwrite) {
     PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite);
     this.rootTasks
-        .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc)));
+        .add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), preInsertTableDesc)));
 
   }
 
@@ -12488,10 +12490,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     if (optionalTezTask.isPresent()) {
       final TezTask tezTask = optionalTezTask.get();
       rootTasks.stream()
-          .filter(task -> task.getWork() instanceof DDLWork)
-          .map(task -> (DDLWork) task.getWork())
-          .filter(ddlWork -> ddlWork.getPreInsertTableDesc() != null)
-          .map(ddlWork -> ddlWork.getPreInsertTableDesc())
+          .filter(task -> task.getWork() instanceof DDLWork2)
+          .map(task -> (DDLWork2) task.getWork())
+          .filter(ddlWork -> ddlWork.getDDLDesc() != null)
+          .map(ddlWork -> (PreInsertTableDesc)ddlWork.getDDLDesc())
           .map(ddlPreInsertTask -> new InsertCommitHookDesc(ddlPreInsertTask.getTable(),
               ddlPreInsertTask.isOverwrite()))
           .forEach(insertCommitHookDesc -> tezTask.addDependentTask(
@@ -13434,8 +13436,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       crtTblDesc.validate(conf);
       // outputs is empty, which means this create table happens in the current
       // database.
-      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-            crtTblDesc)));
+      rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblDesc)));
       break;
     case ctt: // CREATE TRANSACTIONAL TABLE
       if (isExt) {
@@ -13459,7 +13460,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       crtTranTblDesc.validate(conf);
       // outputs is empty, which means this create table happens in the current
       // database.
-      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc)));
+      rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTranTblDesc)));
       break;
 
     case CTLT: // create table like <tbl_name>
@@ -13478,8 +13479,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location,
           storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists,
           likeTableName, isUserStorageFormat);
-      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-            crtTblLikeDesc)));
+      rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblLikeDesc)));
       break;
 
     case CTAS: // create table as select
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 8a51e21..0b6ff52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.DDLTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -59,7 +61,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContex
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
 import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
@@ -358,8 +359,7 @@ public abstract class TaskCompiler {
       // generate a DDL task and make it a dependent task of the leaf
       CreateTableDesc crtTblDesc = pCtx.getCreateTable();
       crtTblDesc.validate(conf);
-      Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(
-          inputs, outputs, crtTblDesc));
+      Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork2(inputs, outputs, crtTblDesc));
       patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames()));
     } else if (pCtx.getQueryProperties().isMaterializedView()) {
       // generate a DDL task and make it a dependent task of the leaf
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
index b95a35a..5e88b6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 
 import java.io.Serializable;
@@ -43,8 +43,8 @@ public class DropPartitionHandler extends AbstractMessageHandler {
       Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
           ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions());
       if (partSpecs.size() > 0) {
-        DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName,
-            partSpecs, null, true, context.eventOnlyReplicationSpec());
+        DropPartitionDesc dropPtnDesc = new DropPartitionDesc(actualDbName + "." + actualTblName, partSpecs, true,
+            context.eventOnlyReplicationSpec());
         Task<DDLWork> dropPtnTask = TaskFactory.get(
             new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf
         );
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
index 62784e9..edef74e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
@@ -18,11 +18,11 @@
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
 import org.apache.hadoop.hive.metastore.messaging.DropTableMessage;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 
 import java.io.Serializable;
 import java.util.Collections;
@@ -39,8 +39,8 @@ public class DropTableHandler extends AbstractMessageHandler {
         actualDbName + "." + actualTblName,
         null, true, true, context.eventOnlyReplicationSpec(), false
     );
-    Task<DDLWork> dropTableTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf
+    Task<DDLWork2> dropTableTask = TaskFactory.get(
+        new DDLWork2(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf
     );
     context.log.debug(
         "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName()
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
index dec6ed5..05a9f91 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
@@ -19,12 +19,12 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 
 import java.io.Serializable;
 import java.util.Iterator;
@@ -59,8 +59,8 @@ public class TruncatePartitionHandler extends AbstractMessageHandler {
             actualDbName + "." + actualTblName, partSpec,
             context.eventOnlyReplicationSpec());
     truncateTableDesc.setWriteId(msg.getWriteId());
-    Task<DDLWork> truncatePtnTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
+    Task<DDLWork2> truncatePtnTask = TaskFactory.get(
+        new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
     context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, partSpec);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
index f037cbb..5ef66fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
@@ -18,12 +18,12 @@
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 
 import java.io.Serializable;
 import java.util.List;
@@ -39,8 +39,8 @@ public class TruncateTableHandler extends AbstractMessageHandler {
             actualDbName + "." + actualTblName,
             null, context.eventOnlyReplicationSpec());
     truncateTableDesc.setWriteId(msg.getWriteId());
-    Task<DDLWork> truncateTableTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
+    Task<DDLWork2> truncateTableTask = TaskFactory.get(
+        new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
 
     context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 6527e52..2b653a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.plan;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
-import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 import java.io.Serializable;
@@ -34,19 +33,12 @@ public class DDLWork implements Serializable {
   private static final long serialVersionUID = 1L;
 
   // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates.
-  private PreInsertTableDesc preInsertTableDesc;
   private InsertCommitHookDesc insertCommitHookDesc;
   private AlterMaterializedViewDesc alterMVDesc;
-  private CreateTableDesc createTblDesc;
-  private CreateTableLikeDesc createTblLikeDesc;
   private CreateViewDesc createVwDesc;
-  private DropTableDesc dropTblDesc;
+  private DropPartitionDesc dropPartitionDesc;
   private AlterTableDesc alterTblDesc;
-  private ShowTablesDesc showTblsDesc;
   private ShowColumnsDesc showColumnsDesc;
-  private ShowTblPropertiesDesc showTblPropertiesDesc;
-  private LockTableDesc lockTblDesc;
-  private UnlockTableDesc unlockTblDesc;
   private ShowFunctionsDesc showFuncsDesc;
   private ShowLocksDesc showLocksDesc;
   private ShowCompactionsDesc showCompactionsDesc;
@@ -54,16 +46,11 @@ public class DDLWork implements Serializable {
   private AbortTxnsDesc abortTxnsDesc;
   private DescFunctionDesc descFunctionDesc;
   private ShowPartitionsDesc showPartsDesc;
-  private ShowCreateDatabaseDesc showCreateDbDesc;
-  private ShowCreateTableDesc showCreateTblDesc;
-  private DescTableDesc descTblDesc;
   private AddPartitionDesc addPartitionDesc;
   private RenamePartitionDesc renamePartitionDesc;
   private AlterTableSimpleDesc alterTblSimpleDesc;
   private MsckDesc msckDesc;
-  private ShowTableStatusDesc showTblStatusDesc;
   private AlterTableAlterPartDesc alterTableAlterPartDesc;
-  private TruncateTableDesc truncateTblDesc;
   private AlterTableExchangePartition alterTableExchangePartition;
   private KillQueryDesc killQueryDesc;
 
@@ -116,12 +103,6 @@ public class DDLWork implements Serializable {
   }
 
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      TruncateTableDesc truncateTblDesc) {
-    this(inputs, outputs);
-    this.truncateTblDesc = truncateTblDesc;
-  }
-
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
       ShowConfDesc showConfDesc) {
     this(inputs, outputs);
     this.showConfDesc = showConfDesc;
@@ -148,28 +129,6 @@ public class DDLWork implements Serializable {
   }
 
   /**
-   * @param createTblDesc
-   *          create table descriptor
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      CreateTableDesc createTblDesc) {
-    this(inputs, outputs);
-
-    this.createTblDesc = createTblDesc;
-  }
-
-  /**
-   * @param createTblLikeDesc
-   *          create table like descriptor
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      CreateTableLikeDesc createTblLikeDesc) {
-    this(inputs, outputs);
-
-    this.createTblLikeDesc = createTblLikeDesc;
-  }
-
-  /**
    * @param createVwDesc
    *          create view descriptor
    */
@@ -185,30 +144,10 @@ public class DDLWork implements Serializable {
    *          drop table descriptor
    */
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      DropTableDesc dropTblDesc) {
-    this(inputs, outputs);
-
-    this.dropTblDesc = dropTblDesc;
-  }
-
-  /**
-   * @param descTblDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      DescTableDesc descTblDesc) {
-    this(inputs, outputs);
-
-    this.descTblDesc = descTblDesc;
-  }
-
-  /**
-   * @param showTblsDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      ShowTablesDesc showTblsDesc) {
+      DropPartitionDesc dropPartitionDesc) {
     this(inputs, outputs);
 
-    this.showTblsDesc = showTblsDesc;
+    this.dropPartitionDesc = dropPartitionDesc;
   }
 
   /**
@@ -222,26 +161,6 @@ public class DDLWork implements Serializable {
   }
 
   /**
-   * @param lockTblDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      LockTableDesc lockTblDesc) {
-    this(inputs, outputs);
-
-    this.lockTblDesc = lockTblDesc;
-  }
-
-  /**
-   * @param unlockTblDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      UnlockTableDesc unlockTblDesc) {
-    this(inputs, outputs);
-
-    this.unlockTblDesc = unlockTblDesc;
-  }
-
-  /**
    * @param showFuncsDesc
    */
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
@@ -300,26 +219,6 @@ public class DDLWork implements Serializable {
   }
 
   /**
-   * @param showCreateDbDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      ShowCreateDatabaseDesc showCreateDbDesc) {
-    this(inputs, outputs);
-
-    this.showCreateDbDesc = showCreateDbDesc;
-  }
-
-  /**
-   * @param showCreateTblDesc
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      ShowCreateTableDesc showCreateTblDesc) {
-    this(inputs, outputs);
-
-    this.showCreateTblDesc = showCreateTblDesc;
-  }
-
-  /**
    * @param addPartitionDesc
    *          information about the partitions we want to add.
    */
@@ -360,28 +259,6 @@ public class DDLWork implements Serializable {
     msckDesc = checkDesc;
   }
 
-  /**
-   * @param showTblStatusDesc
-   *          show table status descriptor
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      ShowTableStatusDesc showTblStatusDesc) {
-    this(inputs, outputs);
-
-    this.showTblStatusDesc = showTblStatusDesc;
-  }
-
-  /**
-   * @param showTblPropertiesDesc
-   *          show table properties descriptor
-   */
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      ShowTblPropertiesDesc showTblPropertiesDesc) {
-    this(inputs, outputs);
-
-    this.showTblPropertiesDesc = showTblPropertiesDesc;
-  }
-
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
       RoleDDLDesc roleDDLDesc) {
     this(inputs, outputs);
@@ -444,12 +321,6 @@ public class DDLWork implements Serializable {
   }
 
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-          PreInsertTableDesc preInsertTableDesc) {
-    this(inputs, outputs);
-    this.preInsertTableDesc = preInsertTableDesc;
-  }
-
-  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
                  KillQueryDesc killQueryDesc) {
     this(inputs, outputs);
     this.killQueryDesc = killQueryDesc;
@@ -536,22 +407,6 @@ public class DDLWork implements Serializable {
   /**
    * @return the createTblDesc
    */
-  @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public CreateTableDesc getCreateTblDesc() {
-    return createTblDesc;
-  }
-
-  /**
-   * @return the createTblDesc
-   */
-  @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public CreateTableLikeDesc getCreateTblLikeDesc() {
-    return createTblLikeDesc;
-  }
-
-  /**
-   * @return the createTblDesc
-   */
   @Explain(displayName = "Create View Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public CreateViewDesc getCreateViewDesc() {
     return createVwDesc;
@@ -560,9 +415,9 @@ public class DDLWork implements Serializable {
   /**
    * @return the dropTblDesc
    */
-  @Explain(displayName = "Drop Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public DropTableDesc getDropTblDesc() {
-    return dropTblDesc;
+  @Explain(displayName = "Drop Partition Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public DropPartitionDesc getDropPartitionDesc() {
+    return dropPartitionDesc;
   }
 
   /**
@@ -583,14 +438,6 @@ public class DDLWork implements Serializable {
   }
 
   /**
-   * @return the showTblsDesc
-   */
-  @Explain(displayName = "Show Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public ShowTablesDesc getShowTblsDesc() {
-    return showTblsDesc;
-  }
-
-  /**
    * @return the showColumnsDesc
    */
   @Explain(displayName = "Show Columns Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -630,22 +477,6 @@ public class DDLWork implements Serializable {
   }
 
   /**
-   * @return the lockTblDesc
-   */
-  @Explain(displayName = "Lock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public LockTableDesc getLockTblDesc() {
-    return lockTblDesc;
-  }
-
-  /**
-   * @return the unlockTblDesc
-   */
-  @Explain(displayName = "Unlock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public UnlockTableDesc getUnlockTblDesc() {
-    return unlockTblDesc;
-  }
-
-  /**
    * @return the descFuncDesc
    */
   @Explain(displayName = "Show Function Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -666,28 +497,6 @@ public class DDLWork implements Serializable {
     return showPartsDesc;
   }
 
-  @Explain(displayName = "Show Create Database Operator",
-      explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public ShowCreateDatabaseDesc getShowCreateDbDesc() {
-    return showCreateDbDesc;
-  }
-
-  /**
-   * @return the showCreateTblDesc
-   */
-  @Explain(displayName = "Show Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public ShowCreateTableDesc getShowCreateTblDesc() {
-    return showCreateTblDesc;
-  }
-
-  /**
-   * @return the descTblDesc
-   */
-  @Explain(displayName = "Describe Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public DescTableDesc getDescTblDesc() {
-    return descTblDesc;
-  }
-
   /**
    * @return information about the partitions we want to add.
    */
@@ -717,17 +526,6 @@ public class DDLWork implements Serializable {
     return msckDesc;
   }
 
-  /**
-   * @return show table descriptor
-   */
-  public ShowTableStatusDesc getShowTblStatusDesc() {
-    return showTblStatusDesc;
-  }
-
-  public ShowTblPropertiesDesc getShowTblPropertiesDesc() {
-    return showTblPropertiesDesc;
-  }
-
   public HashSet<ReadEntity> getInputs() {
     return inputs;
   }
@@ -787,11 +585,6 @@ public class DDLWork implements Serializable {
     return alterTableAlterPartDesc;
   }
 
-  @Explain(displayName = "Truncate Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public TruncateTableDesc getTruncateTblDesc() {
-    return truncateTblDesc;
-  }
-
   /**
    * @return information about the table partition to be exchanged
    */
@@ -815,11 +608,6 @@ public class DDLWork implements Serializable {
     return insertCommitHookDesc;
   }
 
-  @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public PreInsertTableDesc getPreInsertTableDesc() {
-    return preInsertTableDesc;
-  }
-
   @Explain(displayName = "Create resource plan")
   public CreateResourcePlanDesc getCreateResourcePlanDesc() {
     return createResourcePlanDesc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java
new file mode 100644
index 0000000..81fcc46
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+/**
+ * DropPartitionDesc.
+ */
+@Explain(displayName = "Drop Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class DropPartitionDesc extends DDLDesc implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * PartSpec.
+   */
+  public static class PartSpec implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private ExprNodeGenericFuncDesc partSpec;
+    // TODO: see if we can get rid of this... used in one place to distinguish archived parts
+    private int prefixLength;
+
+    public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) {
+      this.partSpec = partSpec;
+      this.prefixLength = prefixLength;
+    }
+
+    public ExprNodeGenericFuncDesc getPartSpec() {
+      return partSpec;
+    }
+
+    public int getPrefixLength() {
+      return prefixLength;
+    }
+  }
+
+  private final String tableName;
+  private final ArrayList<PartSpec> partSpecs;
+  private final boolean ifPurge;
+  private final ReplicationSpec replicationSpec;
+
+  public DropPartitionDesc(String tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs, boolean ifPurge,
+      ReplicationSpec replicationSpec) {
+    this.tableName = tableName;
+    this.partSpecs = new ArrayList<PartSpec>(partSpecs.size());
+    for (Map.Entry<Integer, List<ExprNodeGenericFuncDesc>> partSpec : partSpecs.entrySet()) {
+      int prefixLength = partSpec.getKey();
+      for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) {
+        this.partSpecs.add(new PartSpec(expr, prefixLength));
+      }
+    }
+    this.ifPurge = ifPurge;
+    this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec;
+  }
+
+  @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public String getTableName() {
+    return tableName;
+  }
+
+  public ArrayList<PartSpec> getPartSpecs() {
+    return partSpecs;
+  }
+
+  public boolean getIfPurge() {
+      return ifPurge;
+  }
+
+  /**
+   * @return what kind of replication scope this drop is running under.
+   * This can result in a "DROP IF OLDER THAN" kind of semantic
+   */
+  public ReplicationSpec getReplicationSpec(){
+    return replicationSpec;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
deleted file mode 100644
index 5d22154..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
-/**
- * DropTableDesc.
- * TODO: this is currently used for both drop table and drop partitions.
- */
-@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class DropTableDesc extends DDLDesc implements Serializable {
-  private static final long serialVersionUID = 1L;
-
-  public static class PartSpec {
-    public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) {
-      this.partSpec = partSpec;
-      this.prefixLength = prefixLength;
-    }
-    public ExprNodeGenericFuncDesc getPartSpec() {
-      return partSpec;
-    }
-    public int getPrefixLength() {
-      return prefixLength;
-    }
-    private static final long serialVersionUID = 1L;
-    private ExprNodeGenericFuncDesc partSpec;
-    // TODO: see if we can get rid of this... used in one place to distinguish archived parts
-    private int prefixLength;
-  }
-
-  String tableName;
-  ArrayList<PartSpec> partSpecs;
-  TableType expectedType;
-  boolean ifExists;
-  boolean ifPurge;
-  ReplicationSpec replicationSpec;
-  boolean validationRequired;
-  
-
-  public DropTableDesc() {
-  }
-
-  /**
-   * @param tableName
-   * @param ifPurge
-   */
-  public DropTableDesc(
-      String tableName, TableType expectedType, boolean ifExists,
-      boolean ifPurge, ReplicationSpec replicationSpec) {
-	  this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true);
-  }
-
-  public DropTableDesc(
-      String tableName, TableType expectedType, boolean ifExists,
-      boolean ifPurge, ReplicationSpec replicationSpec, boolean validationRequired) {
-    this.tableName = tableName;
-    this.partSpecs = null;
-    this.expectedType = expectedType;
-    this.ifExists = ifExists;
-    this.ifPurge = ifPurge;
-    this.replicationSpec = replicationSpec;
-    this.validationRequired = validationRequired;
-  }
-
-  public DropTableDesc(String tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs,
-      TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec) {
-    this(tableName, partSpecs, expectedType, ifPurge, replicationSpec, true);
-  }
-
-  public DropTableDesc(String tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs,
-      TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec,  boolean validationRequired) {
-    this.tableName = tableName;
-    this.partSpecs = new ArrayList<PartSpec>(partSpecs.size());
-    for (Map.Entry<Integer, List<ExprNodeGenericFuncDesc>> partSpec : partSpecs.entrySet()) {
-      int prefixLength = partSpec.getKey();
-      for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) {
-        this.partSpecs.add(new PartSpec(expr, prefixLength));
-      }
-    }
-    this.expectedType = expectedType;
-    this.ifPurge = ifPurge;
-    this.replicationSpec = replicationSpec;
-    this.validationRequired = validationRequired;
-  }
-
-  /**
-   * @return the tableName
-   */
-  @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-  public String getTableName() {
-    return tableName;
-  }
-
-  /**
-   * @param tableName
-   *          the tableName to set
-   */
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
-  /**
-   * @return the partSpecs
-   */
-  public ArrayList<PartSpec> getPartSpecs() {
-    return partSpecs;
-  }
-
-  /**
-   * @return whether to expect a view being dropped
-   */
-  public boolean getExpectView() {
-    return expectedType != null && expectedType == TableType.VIRTUAL_VIEW;
-  }
-
-  /**
-   * @return whether to expect a materialized view being dropped
-   */
-  public boolean getExpectMaterializedView() {
-    return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW;
-  }
-
-  /**
-   * @return whether IF EXISTS was specified
-   */
-  public boolean getIfExists() {
-    return ifExists;
-  }
-
-  /**
-   * @param ifExists
-   *          set whether IF EXISTS was specified
-   */
-  public void setIfExists(boolean ifExists) {
-    this.ifExists = ifExists;
-  }
-
-  /**
-   *  @return whether Purge was specified
-   */
-  public boolean getIfPurge() {
-      return ifPurge;
-  }
-
-  /**
-   * @param ifPurge
-   *          set whether Purge was specified
-   */
-  public void setIfPurge(boolean ifPurge) {
-      this.ifPurge = ifPurge;
-  }
-
-  /**
-   * @return what kind of replication scope this drop is running under.
-   * This can result in a "DROP IF OLDER THAN" kind of semantic
-   */
-  public ReplicationSpec getReplicationSpec(){
-    if (replicationSpec == null){
-      this.replicationSpec = new ReplicationSpec();
-    }
-    return this.replicationSpec;
-  }
-
-  /**
-   * @return whether the table type validation is needed (false in repl case)
-   */
-  public boolean getValidationRequired(){
-    return this.validationRequired;
-  }
-}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index 017e1c7..381c3b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -323,9 +325,9 @@ public class ImportTableDesc {
       HiveConf conf) {
     switch (getDescType()) {
     case TABLE:
-        return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc), conf);
+      return TaskFactory.get(new DDLWork2(inputs, outputs, createTblDesc), conf);
     case VIEW:
-        return TaskFactory.get(new DDLWork(inputs, outputs, createViewDesc), conf);
+      return TaskFactory.get(new DDLWork(inputs, outputs, createViewDesc), conf);
     }
     return null;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
index 46761ff..3abdc48 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.plan;
 import java.io.Serializable;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 33a5371..b668e40 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 198f7fd..1019285 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -60,9 +60,8 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.ql.DriverUtils;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableOperation;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
@@ -652,7 +651,7 @@ public class CompactorMR {
     String sh = t.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE);
     assert sh == null; // Not supposed to be a compactable table.
     if (!serdeParams.isEmpty()) {
-      DDLTask.appendSerdeParams(query, serdeParams);
+      ShowCreateTableOperation.appendSerdeParams(query, serdeParams);
     }
     query.append("STORED AS INPUTFORMAT '").append(
         HiveStringUtils.escapeHiveCommand(sd.getInputFormat())).append("' OUTPUTFORMAT '").append(
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java
index 1ad0225..ae22b7f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.QueryPlan;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.ddl.DDLTask2;
+import org.apache.hadoop.hive.ql.ddl.DDLWork2;
+import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.junit.Assert;
 import org.junit.Test;
@@ -149,9 +149,9 @@ public class TestHiveDecimalParse {
     }
 
     QueryPlan plan = driver.getPlan();
-    DDLTask task = (DDLTask) plan.getRootTasks().get(0);
-    DDLWork work = task.getWork();
-    CreateTableDesc spec = work.getCreateTblDesc();
+    DDLTask2 task = (DDLTask2) plan.getRootTasks().get(0);
+    DDLWork2 work = task.getWork();
+    CreateTableDesc spec = (CreateTableDesc)work.getDDLDesc();
     FieldSchema fs = spec.getCols().get(0);
     return fs.getType();
   }
diff --git a/ql/src/test/queries/clientpositive/db_ddl_explain.q b/ql/src/test/queries/clientpositive/db_ddl_explain.q
new file mode 100644
index 0000000..7ad0bdd
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/db_ddl_explain.q
@@ -0,0 +1,20 @@
+EXPLAIN CREATE DATABASE d;
+CREATE DATABASE d;
+
+EXPLAIN SHOW DATABASES;
+SHOW DATABASES;
+
+EXPLAIN DESCRIBE DATABASE d;
+DESCRIBE DATABASE d;
+
+EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis');
+ALTER DATABASE d SET dbproperties('test'='yesthisis');
+
+EXPLAIN SHOW CREATE DATABASE d;
+SHOW CREATE DATABASE d;
+
+EXPLAIN USE d;
+USE d;
+
+EXPLAIN DROP DATABASE d;
+DROP DATABASE d;
diff --git a/ql/src/test/results/clientnegative/authorization_explain.q.out b/ql/src/test/results/clientnegative/authorization_explain.q.out
index 792de42..29542a1 100644
--- a/ql/src/test/results/clientnegative/authorization_explain.q.out
+++ b/ql/src/test/results/clientnegative/authorization_explain.q.out
@@ -11,14 +11,13 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          if not exists: true
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.authorization_explain
+    Create Table
+      columns: key int, value string
+      if not exists: true
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.authorization_explain
 
 PREHOOK: query: create table if not exists authorization_explain (key int, value string)
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientnegative/avro_decimal.q.out b/ql/src/test/results/clientnegative/avro_decimal.q.out
index 9d00d6e..077b2d8 100644
--- a/ql/src/test/results/clientnegative/avro_decimal.q.out
+++ b/ql/src/test/results/clientnegative/avro_decimal.q.out
@@ -19,4 +19,4 @@ TBLPROPERTIES (
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@avro_dec
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type)
diff --git a/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out b/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out
index 8a154f6..b94ca1c 100644
--- a/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out
+++ b/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: create table t1(j int constraint c1 default 4)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Constraint name already exists: c1)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Constraint name already exists: c1)
diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out b/ql/src/test/results/clientnegative/create_external_acid.q.out
index 123fe5a..85b0458 100644
--- a/ql/src/test/results/clientnegative/create_external_acid.q.out
+++ b/ql/src/test/results/clientnegative/create_external_acid.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create external table acid_external (a int, b varchar(128)) clus
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_external
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:default.acid_external cannot be declared transactional because it's an external table)
diff --git a/ql/src/test/results/clientnegative/create_not_acid.q.out b/ql/src/test/results/clientnegative/create_not_acid.q.out
index e5aad61..3172f18 100644
--- a/ql/src/test/results/clientnegative/create_not_acid.q.out
+++ b/ql/src/test/results/clientnegative/create_not_acid.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) TBLPROPERTI
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_notbucketed
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed)
diff --git a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
index 931f2a7..694dc73 100644
--- a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
+++ b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
@@ -8,6 +8,6 @@ PREHOOK: query: create table aa ( test STRING )
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@aa
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.util.regex.PatternSyntaxException: Unclosed character class near index 7
 [^\](.*)
        ^
diff --git a/ql/src/test/results/clientnegative/create_view_failure2.q.out b/ql/src/test/results/clientnegative/create_view_failure2.q.out
index ad5d5fe..c62dc32 100644
--- a/ql/src/test/results/clientnegative/create_view_failure2.q.out
+++ b/ql/src/test/results/clientnegative/create_view_failure2.q.out
@@ -17,4 +17,4 @@ PREHOOK: query: CREATE TABLE xxx4(key int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@xxx4
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. AlreadyExistsException(message:Table hive.default.xxx4 already exists)
diff --git a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
index b3d1d9f..01baf1b 100644
--- a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
+++ b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: create table t2(x int, constraint pk1 primary key (x) disable)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Constraint name already exists: pk1)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Constraint name already exists: pk1)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
index 6598d6c..c54c724 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Child column not found: x)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. InvalidObjectException(message:Child column not found: x)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
index fae2769..4ec45a6 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING PRIMARY KEY DISABLE, b STRING, CON
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
index 1644d5a..04365f0 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING UNIQUE DISABLE, b STRING, CONSTRAI
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
index ce0f947..ca1304e 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;])
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;])
diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
index 998c643..32d6284 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;])
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;])
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
index 3fad08c..d2bcea0 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
@@ -12,4 +12,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@drop_notablelock
 PREHOOK: query: lock table drop_notablelock shared
 PREHOOK: type: LOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
index 2d9a20f..8e7dc6a 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
@@ -12,4 +12,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@drop_notableunlock
 PREHOOK: query: unlock table drop_notableunlock
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/deletejar.q.out b/ql/src/test/results/clientnegative/deletejar.q.out
index d52186b..ff77603 100644
--- a/ql/src/test/results/clientnegative/deletejar.q.out
+++ b/ql/src/test/results/clientnegative/deletejar.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERD
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@DELETEJAR
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe
diff --git a/ql/src/test/results/clientnegative/describe_xpath1.q.out b/ql/src/test/results/clientnegative/describe_xpath1.q.out
index 322e6e8..ca8e5d0 100644
--- a/ql/src/test/results/clientnegative/describe_xpath1.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath1.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift $elem$
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test [...]
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test [...]
diff --git a/ql/src/test/results/clientnegative/describe_xpath2.q.out b/ql/src/test/results/clientnegative/describe_xpath2.q.out
index c1f2ec1..f1099c9 100644
--- a/ql/src/test/results/clientnegative/describe_xpath2.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath2.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift $key$
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test. [...]
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test. [...]
diff --git a/ql/src/test/results/clientnegative/describe_xpath3.q.out b/ql/src/test/results/clientnegative/describe_xpath3.q.out
index a300633..d29d093 100644
--- a/ql/src/test/results/clientnegative/describe_xpath3.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath3.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift lint.abc
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error in getting fields from serde.Unknown type for abc
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Error in getting fields from serde.Unknown type for abc
diff --git a/ql/src/test/results/clientnegative/describe_xpath4.q.out b/ql/src/test/results/clientnegative/describe_xpath4.q.out
index b569eca..ec81c9c 100644
--- a/ql/src/test/results/clientnegative/describe_xpath4.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath4.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift mStringString.abc
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error in getting fields from serde.Unknown type for abc
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Error in getting fields from serde.Unknown type for abc
diff --git a/ql/src/test/results/clientnegative/drop_table_failure2.q.out b/ql/src/test/results/clientnegative/drop_table_failure2.q.out
index f0097cd..c3d94a7 100644
--- a/ql/src/test/results/clientnegative/drop_table_failure2.q.out
+++ b/ql/src/test/results/clientnegative/drop_table_failure2.q.out
@@ -13,4 +13,4 @@ PREHOOK: query: DROP TABLE xxx6
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@xxx6
 PREHOOK: Output: default@xxx6
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a view with DROP TABLE
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a view with DROP TABLE
diff --git a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
index 88e3b7d..efc080e 100644
--- a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
+++ b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
@@ -32,4 +32,4 @@ PREHOOK: query: drop table mytable
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@mytable
 PREHOOK: Output: default@mytable
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop table since it is used by at least one materialized view definition. Please drop any materialized view that uses the table before dropping it
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop table since it is used by at least one materialized view definition. Please drop any materialized view that uses the table before dropping it
diff --git a/ql/src/test/results/clientnegative/drop_view_failure1.q.out b/ql/src/test/results/clientnegative/drop_view_failure1.q.out
index a1a4498..87d91ed 100644
--- a/ql/src/test/results/clientnegative/drop_view_failure1.q.out
+++ b/ql/src/test/results/clientnegative/drop_view_failure1.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: DROP VIEW xxx1
 PREHOOK: type: DROPVIEW
 PREHOOK: Input: default@xxx1
 PREHOOK: Output: default@xxx1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a base table with DROP VIEW
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a base table with DROP VIEW
diff --git a/ql/src/test/results/clientnegative/druid_address.q.out b/ql/src/test/results/clientnegative/druid_address.q.out
index 66b7e14..c26eff3 100644
--- a/ql/src/test/results/clientnegative/druid_address.q.out
+++ b/ql/src/test/results/clientnegative/druid_address.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration)
diff --git a/ql/src/test/results/clientnegative/druid_buckets.q.out b/ql/src/test/results/clientnegative/druid_buckets.q.out
index 94e4f70..ad381f2 100644
--- a/ql/src/test/results/clientnegative/druid_buckets.q.out
+++ b/ql/src/test/results/clientnegative/druid_buckets.q.out
@@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:CLUSTERED BY may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/druid_case.q.out b/ql/src/test/results/clientnegative/druid_case.q.out
index 457028b..b18f44f 100644
--- a/ql/src/test/results/clientnegative/druid_case.q.out
+++ b/ql/src/test/results/clientnegative/druid_case.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition.
diff --git a/ql/src/test/results/clientnegative/druid_datasource.q.out b/ql/src/test/results/clientnegative/druid_datasource.q.out
index 177ffaa..de170fd 100644
--- a/ql/src/test/results/clientnegative/druid_datasource.q.out
+++ b/ql/src/test/results/clientnegative/druid_datasource.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties)
diff --git a/ql/src/test/results/clientnegative/druid_datasource2.q.out b/ql/src/test/results/clientnegative/druid_datasource2.q.out
index 2f783fe..71371c3 100644
--- a/ql/src/test/results/clientnegative/druid_datasource2.q.out
+++ b/ql/src/test/results/clientnegative/druid_datasource2.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost", "druid.datasource" = "mydatasource")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/druid_location.q.out b/ql/src/test/results/clientnegative/druid_location.q.out
index 5727e8c..7ee8262 100644
--- a/ql/src/test/results/clientnegative/druid_location.q.out
+++ b/ql/src/test/results/clientnegative/druid_location.q.out
@@ -6,4 +6,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:LOCATION may not be specified for Druid)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:LOCATION may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/druid_partitions.q.out b/ql/src/test/results/clientnegative/druid_partitions.q.out
index 6fb55c1..81325a8 100644
--- a/ql/src/test/results/clientnegative/druid_partitions.q.out
+++ b/ql/src/test/results/clientnegative/druid_partitions.q.out
@@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:PARTITIONED BY may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/external1.q.out b/ql/src/test/results/clientnegative/external1.q.out
index f2bc9c6..8d47ccd 100644
--- a/ql/src/test/results/clientnegative/external1.q.out
+++ b/ql/src/test/results/clientnegative/external1.q.out
@@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@external1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
diff --git a/ql/src/test/results/clientnegative/insert_sorted.q.out b/ql/src/test/results/clientnegative/insert_sorted.q.out
index bb3c7e3..fef40ee 100644
--- a/ql/src/test/results/clientnegative/insert_sorted.q.out
+++ b/ql/src/test/results/clientnegative/insert_sorted.q.out
@@ -20,4 +20,4 @@ PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_insertsort
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.)
diff --git a/ql/src/test/results/clientnegative/lockneg1.q.out b/ql/src/test/results/clientnegative/lockneg1.q.out
index 3a96cda..cbcefa0 100644
--- a/ql/src/test/results/clientnegative/lockneg1.q.out
+++ b/ql/src/test/results/clientnegative/lockneg1.q.out
@@ -31,4 +31,4 @@ POSTHOOK: type: LOCKTABLE
 PREHOOK: query: LOCK TABLE tstsrc EXCLUSIVE
 PREHOOK: type: LOCKTABLE
 Unable to acquire EXPLICIT, EXCLUSIVE lock default@tstsrc after 1 attempts.
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2
diff --git a/ql/src/test/results/clientnegative/lockneg2.q.out b/ql/src/test/results/clientnegative/lockneg2.q.out
index 31e9087..3e988b9 100644
--- a/ql/src/test/results/clientnegative/lockneg2.q.out
+++ b/ql/src/test/results/clientnegative/lockneg2.q.out
@@ -22,4 +22,4 @@ POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string
 POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: UNLOCK TABLE tstsrc
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Table tstsrc is not locked 
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Table tstsrc is not locked 
diff --git a/ql/src/test/results/clientnegative/lockneg3.q.out b/ql/src/test/results/clientnegative/lockneg3.q.out
index e4f6357..1403c73 100644
--- a/ql/src/test/results/clientnegative/lockneg3.q.out
+++ b/ql/src/test/results/clientnegative/lockneg3.q.out
@@ -26,4 +26,4 @@ POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpar
 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11')
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Table tstsrcpart is not locked 
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Table tstsrcpart is not locked 
diff --git a/ql/src/test/results/clientnegative/materialized_view_drop.q.out b/ql/src/test/results/clientnegative/materialized_view_drop.q.out
index da95afb..f059e6d 100644
--- a/ql/src/test/results/clientnegative/materialized_view_drop.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_drop.q.out
@@ -39,4 +39,4 @@ PREHOOK: query: drop materialized view cmv_basetable
 PREHOOK: type: DROP_MATERIALIZED_VIEW
 PREHOOK: Input: default@cmv_basetable
 PREHOOK: Output: default@cmv_basetable
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a base table with DROP MATERIALIZED VIEW
diff --git a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
index d4f243c..e3c7053 100644
--- a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
@@ -31,4 +31,4 @@ PREHOOK: query: drop view cmv_mat_view
 PREHOOK: type: DROPVIEW
 PREHOOK: Input: default@cmv_mat_view
 PREHOOK: Output: default@cmv_mat_view
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot drop a materialized view with DROP VIEW
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Cannot drop a materialized view with DROP VIEW
diff --git a/ql/src/test/results/clientnegative/nested_complex_neg.q.out b/ql/src/test/results/clientnegative/nested_complex_neg.q.out
index a6f9ac5..200eee1 100644
--- a/ql/src/test/results/clientnegative/nested_complex_neg.q.out
+++ b/ql/src/test/results/clientnegative/nested_complex_neg.q.out
@@ -7,4 +7,4 @@ simple_string string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@nestedcomplex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.)
diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out
index 1047a82..faa5d7b 100644
--- a/ql/src/test/results/clientnegative/serde_regex.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex.q.out
@@ -22,4 +22,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct<a:int,b:string>)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct<a:int,b:string>)
diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out
index 33d647b..2214638 100644
--- a/ql/src/test/results/clientnegative/serde_regex3.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex3.q.out
@@ -19,4 +19,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!)
diff --git a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
index d7b9965..bacbda1 100644
--- a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
+++ b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_f
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@c/b/o_t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
index a659644..04b8fb4 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
@@ -26,4 +26,4 @@ PREHOOK: query: create table strict_managed_tables1_tab4 (c1 string, c2 string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables1_tab4
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
index 0c7576f..0bff565 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
@@ -28,4 +28,4 @@ STORED AS AVRO
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables6_tab2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
index 0e29fbd..4d9b5d0 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
@@ -16,4 +16,4 @@ STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables5_tab2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out
index aff5752..efbd0d8 100644
--- a/ql/src/test/results/clientpositive/ambiguitycheck.q.out
+++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out
@@ -828,10 +828,9 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Describe Table Operator:
-        Describe Table
+    Describe Table
 #### A masked pattern was here ####
-          table: default.src
+      table: default.src
 
   Stage: Stage-1
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
index d7f7b22..1e68e78 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
@@ -367,13 +367,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-7
-      Create Table Operator:
-        Create Table
-          columns: _c0 int
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.tmp_n0
+    Create Table
+      columns: _c0 int
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.tmp_n0
 
   Stage: Stage-2
     Map Reduce
diff --git a/ql/src/test/results/clientpositive/create_union_table.q.out b/ql/src/test/results/clientpositive/create_union_table.q.out
index f773f34..17b5fc0 100644
--- a/ql/src/test/results/clientpositive/create_union_table.q.out
+++ b/ql/src/test/results/clientpositive/create_union_table.q.out
@@ -13,13 +13,12 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: mydata uniontype<int,double,array<string>,struct<a:int,b:string>>, strct struct<a:int,b:string,c:string>
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.abc
+    Create Table
+      columns: mydata uniontype<int,double,array<string>,struct<a:int,b:string>>, strct struct<a:int,b:string,c:string>
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.abc
 
 PREHOOK: query: create table abc(mydata uniontype<int,double,array<string>,struct<a:int,b:string>>,
 strct struct<a:int, b:string, c:string>)
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 7c7378a..c4168b1 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -113,13 +113,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: k string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_CTAS1
+    Create Table
+      columns: k string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_CTAS1
 
   Stage: Stage-3
     Stats Work
@@ -316,13 +315,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas2
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas2
 
   Stage: Stage-3
     Stats Work
@@ -519,13 +517,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: half_key double, conb string
-          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          name: default.nzhang_ctas3
+    Create Table
+      columns: half_key double, conb string
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+      name: default.nzhang_ctas3
 
   Stage: Stage-3
     Stats Work
@@ -786,14 +783,13 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas4
+    Create Table
+      columns: key string, value string
+      field delimiter: ,
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas4
 
   Stage: Stage-3
     Stats Work
@@ -991,16 +987,15 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          line delimiter: 
+    Create Table
+      columns: key string, value string
+      field delimiter: ,
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      line delimiter: 
 
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas5
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas5
 
   Stage: Stage-3
     Stats Work
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 9db4ddd..b875615 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -76,13 +76,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string, _c1 double, _c2 string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.summary
+    Create Table
+      columns: key string, value string, _c1 double, _c2 string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.summary
 
   Stage: Stage-2
     Stats Work
@@ -287,13 +286,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string, rr int
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x4
+    Create Table
+      columns: key string, value string, rr int
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x4
 
   Stage: Stage-2
     Stats Work
@@ -530,13 +528,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-5
-      Create Table Operator:
-        Create Table
-          columns: key string, value string, lead1 string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x5
+    Create Table
+      columns: key string, value string, lead1 string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x5
 
   Stage: Stage-3
     Stats Work
@@ -731,13 +728,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string, _c1 double
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x6
+    Create Table
+      columns: key string, value string, _c1 double
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x6
 
   Stage: Stage-2
     Stats Work
@@ -940,13 +936,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: _col0 string, _col1 string, _c1 bigint
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x7
+    Create Table
+      columns: _col0 string, _col1 string, _c1 bigint
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x7
 
   Stage: Stage-2
     Stats Work
@@ -1426,13 +1421,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: _col0 string, _col1 string, _c1 bigint
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x8
+    Create Table
+      columns: _col0 string, _col1 string, _c1 bigint
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x8
 
   Stage: Stage-2
     Stats Work
@@ -1615,13 +1609,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: _c0 string, key string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.x9
+    Create Table
+      columns: _c0 string, key string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.x9
 
   Stage: Stage-2
     Stats Work
diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
index eb3872e..ddc8368 100644
--- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
+++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
@@ -97,13 +97,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: db1.table_db1
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: db1.table_db1
 
   Stage: Stage-2
     Stats Work
diff --git a/ql/src/test/results/clientpositive/db_ddl_explain.q.out b/ql/src/test/results/clientpositive/db_ddl_explain.q.out
new file mode 100644
index 0000000..8e85d75
--- /dev/null
+++ b/ql/src/test/results/clientpositive/db_ddl_explain.q.out
@@ -0,0 +1,171 @@
+PREHOOK: query: EXPLAIN CREATE DATABASE d
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:d
+POSTHOOK: query: EXPLAIN CREATE DATABASE d
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Create Database
+      name: d
+
+PREHOOK: query: CREATE DATABASE d
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:d
+POSTHOOK: query: CREATE DATABASE d
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:d
+PREHOOK: query: EXPLAIN SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: EXPLAIN SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-0
+    Show Databases
+
+  Stage: Stage-1
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+d
+default
+PREHOOK: query: EXPLAIN DESCRIBE DATABASE d
+PREHOOK: type: DESCDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: EXPLAIN DESCRIBE DATABASE d
+POSTHOOK: type: DESCDATABASE
+POSTHOOK: Input: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-0
+    Describe Database
+      database: d
+
+  Stage: Stage-1
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: DESCRIBE DATABASE d
+PREHOOK: type: DESCDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: DESCRIBE DATABASE d
+POSTHOOK: type: DESCDATABASE
+POSTHOOK: Input: database:d
+d		location/in/test	hive_test_user	USER	
+PREHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis')
+PREHOOK: type: ALTERDATABASE
+PREHOOK: Output: database:d
+POSTHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis')
+POSTHOOK: type: ALTERDATABASE
+POSTHOOK: Output: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Alter Database
+      name: d
+      properties:
+        test yesthisis
+
+PREHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis')
+PREHOOK: type: ALTERDATABASE
+PREHOOK: Output: database:d
+POSTHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis')
+POSTHOOK: type: ALTERDATABASE
+POSTHOOK: Output: database:d
+PREHOOK: query: EXPLAIN SHOW CREATE DATABASE d
+PREHOOK: type: SHOW_CREATEDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: EXPLAIN SHOW CREATE DATABASE d
+POSTHOOK: type: SHOW_CREATEDATABASE
+POSTHOOK: Input: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-0
+    Show Create Database
+      database name: d
+
+  Stage: Stage-1
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SHOW CREATE DATABASE d
+PREHOOK: type: SHOW_CREATEDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: SHOW CREATE DATABASE d
+POSTHOOK: type: SHOW_CREATEDATABASE
+POSTHOOK: Input: database:d
+CREATE DATABASE `d`
+LOCATION
+#### A masked pattern was here ####
+WITH DBPROPERTIES (
+  'test'='yesthisis')
+PREHOOK: query: EXPLAIN USE d
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: EXPLAIN USE d
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Switch Database
+      name: d
+
+PREHOOK: query: USE d
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:d
+POSTHOOK: query: USE d
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:d
+PREHOOK: query: EXPLAIN DROP DATABASE d
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:d
+PREHOOK: Output: database:d
+POSTHOOK: query: EXPLAIN DROP DATABASE d
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:d
+POSTHOOK: Output: database:d
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Drop Database
+      database: d
+      if exists: false
+
+PREHOOK: query: DROP DATABASE d
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:d
+PREHOOK: Output: database:d
+POSTHOOK: query: DROP DATABASE d
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:d
+POSTHOOK: Output: database:d
diff --git a/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out b/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out
index 85f4f53..e2c4443 100644
--- a/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out
+++ b/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out
@@ -42,8 +42,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Drop Table Operator:
-        Drop Table
+      Drop Partition Operator:
+        Drop Partition
           table: dmp.mp
 
 PREHOOK: query: alter table dmp.mp drop partition (b='1')
diff --git a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out
index 6b59749..53978e8 100644
--- a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out
+++ b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out
@@ -57,8 +57,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Drop Table Operator:
-        Drop Table
+      Drop Partition Operator:
+        Drop Partition
           table: dmp.mp_n0
 
 PREHOOK: query: alter table dmp.mp_n0 drop partition (b='1')
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
index d25b350..7506ca8 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
@@ -166,17 +166,16 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: __time timestamp with local time zone, cstring1 string, cstring2 string, cdouble double, cfloat float, ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cboolean1 boolean, cboolean2 boolean
-          storage handler: org.apache.hadoop.hive.druid.DruidStorageHandler
-          name: default.druid_partitioned_table
-          table properties:
-            druid.query.granularity MINUTE
-            druid.segment.granularity HOUR
-            druid.segment.targetShardsPerGranularity 6
-            external.table.purge true
-          isExternal: true
+    Create Table
+      columns: __time timestamp with local time zone, cstring1 string, cstring2 string, cdouble double, cfloat float, ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cboolean1 boolean, cboolean2 boolean
+      storage handler: org.apache.hadoop.hive.druid.DruidStorageHandler
+      name: default.druid_partitioned_table
+      table properties:
+        druid.query.granularity MINUTE
+        druid.segment.granularity HOUR
+        druid.segment.targetShardsPerGranularity 6
+        external.table.purge true
+      isExternal: true
 
   Stage: Stage-3
     Stats Work
@@ -353,8 +352,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Tez
@@ -498,8 +496,7 @@ STAGE PLANS:
             COLUMN_STATS_ACCURATE 
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Tez
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index 96690af..8bd8809 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -480,8 +480,7 @@ STAGE PLANS:
     Materialized View Work
 
   Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
+    Pre-Insert task
 
   Stage: Stage-2
     Tez
diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out
index a71925f..fa98560 100644
--- a/ql/src/test/results/clientpositive/explain_ddl.q.out
+++ b/ql/src/test/results/clientpositive/explain_ddl.q.out
@@ -125,13 +125,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.M1
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.M1
 
   Stage: Stage-2
     Stats Work
@@ -254,13 +253,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.M1
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.M1
 
   Stage: Stage-2
     Stats Work
@@ -387,13 +385,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.M1
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.M1
 
   Stage: Stage-2
     Stats Work
@@ -516,13 +513,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-8
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.V1_n0
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.V1_n0
 
   Stage: Stage-2
     Stats Work
@@ -597,13 +593,12 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          default input format: org.apache.hadoop.mapred.TextInputFormat
-          default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          like: src
-          name: default.M1
+    Create Table
+      default input format: org.apache.hadoop.mapred.TextInputFormat
+      default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      like: src
+      name: default.M1
 
 PREHOOK: query: EXPLAIN CREATE TABLE M1 LIKE M1
 PREHOOK: type: CREATETABLE
@@ -618,13 +613,12 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          default input format: org.apache.hadoop.mapred.TextInputFormat
-          default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          like: M1
-          name: default.M1
+    Create Table
+      default input format: org.apache.hadoop.mapred.TextInputFormat
+      default output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      default serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      like: M1
+      name: default.M1
 
 PREHOOK: query: EXPLAIN DROP TABLE M1
 PREHOOK: type: DROPTABLE
@@ -639,9 +633,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Drop Table Operator:
-        Drop Table
-          table: M1
+    Drop Table
+      table: M1
 
 PREHOOK: query: select count(*) from M1 where key > 0
 PREHOOK: type: QUERY
@@ -793,9 +786,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Truncate Table Operator:
-        Truncate Table or Partition
-          TableName: M1
+    Truncate Table or Partition
+      TableName: M1
 
 PREHOOK: query: select count(*) from M1 where key > 0
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out
index 8610fa0..37555e5 100644
--- a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out
+++ b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out
@@ -17,12 +17,11 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-          output format: org.apache.hadoop.mapred.SequenceFileOutputFormat
-          name: default.dest1_n85
+    Create Table
+      columns: key int, value string
+      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+      output format: org.apache.hadoop.mapred.SequenceFileOutputFormat
+      name: default.dest1_n85
 
 PREHOOK: query: CREATE TABLE dest1_n85(key INT, value STRING) STORED AS
   INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat'
diff --git a/ql/src/test/results/clientpositive/fileformat_text.q.out b/ql/src/test/results/clientpositive/fileformat_text.q.out
index 387f807..6d89660 100644
--- a/ql/src/test/results/clientpositive/fileformat_text.q.out
+++ b/ql/src/test/results/clientpositive/fileformat_text.q.out
@@ -17,12 +17,11 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          name: default.dest1_n107
+    Create Table
+      columns: key int, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      name: default.dest1_n107
 
 PREHOOK: query: CREATE TABLE dest1_n107(key INT, value STRING) STORED AS
   INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
index 432ff08..d6d1472 100644
--- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
+++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
@@ -162,13 +162,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, dummy1 string, dummy2 string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.dummy_n6
+    Create Table
+      columns: key string, dummy1 string, dummy2 string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.dummy_n6
 
   Stage: Stage-2
     Stats Work
diff --git a/ql/src/test/results/clientpositive/input1.q.out b/ql/src/test/results/clientpositive/input1.q.out
index 63f8af0..623b973 100644
--- a/ql/src/test/results/clientpositive/input1.q.out
+++ b/ql/src/test/results/clientpositive/input1.q.out
@@ -20,9 +20,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Describe Table Operator:
-        Describe Table
-          table: TEST1_n6
+    Describe Table
+      table: TEST1_n6
 
   Stage: Stage-1
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/input10.q.out b/ql/src/test/results/clientpositive/input10.q.out
index bbdff6e..977cf33 100644
--- a/ql/src/test/results/clientpositive/input10.q.out
+++ b/ql/src/test/results/clientpositive/input10.q.out
@@ -20,9 +20,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Describe Table Operator:
-        Describe Table
-          table: TEST10
+    Describe Table
+      table: TEST10
 
   Stage: Stage-1
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/input15.q.out b/ql/src/test/results/clientpositive/input15.q.out
index 2dbf6fb..ecfacf2 100644
--- a/ql/src/test/results/clientpositive/input15.q.out
+++ b/ql/src/test/results/clientpositive/input15.q.out
@@ -13,14 +13,13 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          field delimiter: 	
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.TEST15
+    Create Table
+      columns: key int, value string
+      field delimiter: 	
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.TEST15
 
 PREHOOK: query: CREATE TABLE TEST15(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientpositive/input2.q.out b/ql/src/test/results/clientpositive/input2.q.out
index aada917..28f7da4 100644
--- a/ql/src/test/results/clientpositive/input2.q.out
+++ b/ql/src/test/results/clientpositive/input2.q.out
@@ -84,10 +84,9 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Show Table Operator:
-        Show Tables
-          database name: default
-          pattern: TEST2*
+    Show Tables
+      database name: default
+      pattern: TEST2*
 
   Stage: Stage-1
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/inputddl1.q.out b/ql/src/test/results/clientpositive/inputddl1.q.out
index a95e9f1..2c51ce1 100644
--- a/ql/src/test/results/clientpositive/inputddl1.q.out
+++ b/ql/src/test/results/clientpositive/inputddl1.q.out
@@ -13,13 +13,12 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.INPUTDDL1
+    Create Table
+      columns: key int, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.INPUTDDL1
 
 PREHOOK: query: CREATE TABLE INPUTDDL1(key INT, value STRING) STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientpositive/inputddl2.q.out b/ql/src/test/results/clientpositive/inputddl2.q.out
index a5ec1c9..b6cfbd6 100644
--- a/ql/src/test/results/clientpositive/inputddl2.q.out
+++ b/ql/src/test/results/clientpositive/inputddl2.q.out
@@ -13,14 +13,13 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          partition columns: ds string, country string
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.INPUTDDL2
+    Create Table
+      columns: key int, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      partition columns: ds string, country string
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.INPUTDDL2
 
 PREHOOK: query: CREATE TABLE INPUTDDL2(key INT, value STRING) PARTITIONED BY(ds STRING, country STRING) STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientpositive/inputddl3.q.out b/ql/src/test/results/clientpositive/inputddl3.q.out
index 639f095..c57ca60 100644
--- a/ql/src/test/results/clientpositive/inputddl3.q.out
+++ b/ql/src/test/results/clientpositive/inputddl3.q.out
@@ -13,14 +13,13 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Create Table Operator:
-        Create Table
-          columns: key int, value string
-          field delimiter: 	
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.INPUTDDL3
+    Create Table
+      columns: key int, value string
+      field delimiter: 	
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.INPUTDDL3
 
 PREHOOK: query: CREATE TABLE INPUTDDL3(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientpositive/inputddl6.q.out b/ql/src/test/results/clientpositive/inputddl6.q.out
index e14807c..47cc61f 100644
--- a/ql/src/test/results/clientpositive/inputddl6.q.out
+++ b/ql/src/test/results/clientpositive/inputddl6.q.out
@@ -91,11 +91,10 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-0
-      Describe Table Operator:
-        Describe Table
-          partition:
-            ds 2008-04-09
-          table: INPUTDDL6
+    Describe Table
+      partition:
+        ds 2008-04-09
+      table: INPUTDDL6
 
   Stage: Stage-1
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/llap/ctas.q.out b/ql/src/test/results/clientpositive/llap/ctas.q.out
index c761b9d..c42a494 100644
--- a/ql/src/test/results/clientpositive/llap/ctas.q.out
+++ b/ql/src/test/results/clientpositive/llap/ctas.q.out
@@ -123,13 +123,12 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: k string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_CTAS1
+    Create Table
+      columns: k string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_CTAS1
 
   Stage: Stage-3
     Stats Work
@@ -319,13 +318,12 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas2
+    Create Table
+      columns: key string, value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas2
 
   Stage: Stage-3
     Stats Work
@@ -515,13 +513,12 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: half_key double, conb string
-          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          name: default.nzhang_ctas3
+    Create Table
+      columns: half_key double, conb string
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+      name: default.nzhang_ctas3
 
   Stage: Stage-3
     Stats Work
@@ -775,14 +772,13 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas4
+    Create Table
+      columns: key string, value string
+      field delimiter: ,
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas4
 
   Stage: Stage-3
     Stats Work
@@ -973,16 +969,15 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          line delimiter: 
+    Create Table
+      columns: key string, value string
+      field delimiter: ,
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      line delimiter: 
 
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.nzhang_ctas5
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.nzhang_ctas5
 
   Stage: Stage-3
     Stats Work
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index adf8011..7edfc38 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -142,13 +142,12 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: ds string, date string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.srcpart_date_n2
+    Create Table
+      columns: ds string, date string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.srcpart_date_n2
 
   Stage: Stage-3
     Stats Work
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index 1ee459b..1ea8fdc 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -7,8 +7,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc_merge_test_part_n1
 Stage-0
-  Create Table Operator:
-    name:default.src_orc_merge_test_part_n1
+  Create Table{"name:":"default.src_orc_merge_test_part_n1"}
 
 PREHOOK: query: create table src_orc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as orc
 PREHOOK: type: CREATETABLE
@@ -3350,8 +3349,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@abcd_n1
 Stage-0
-  Create Table Operator:
-    name:default.abcd_n1
+  Create Table{"name:":"default.abcd_n1"}
 
 PREHOOK: query: create table abcd_n1 (a int, b int, c int, d int)
 PREHOOK: type: CREATETABLE
@@ -3438,8 +3436,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_rc_merge_test_n0
 Stage-0
-  Create Table Operator:
-    name:default.src_rc_merge_test_n0
+  Create Table{"name:":"default.src_rc_merge_test_n0"}
 
 PREHOOK: query: create table src_rc_merge_test_n0(key int, value string) stored as rcfile
 PREHOOK: type: CREATETABLE
@@ -3466,8 +3463,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tgt_rc_merge_test_n0
 Stage-0
-  Create Table Operator:
-    name:default.tgt_rc_merge_test_n0
+  Create Table{"name:":"default.tgt_rc_merge_test_n0"}
 
 PREHOOK: query: create table tgt_rc_merge_test_n0(key int, value string) stored as rcfile
 PREHOOK: type: CREATETABLE
@@ -3697,8 +3693,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_Tmp_n1
 Stage-0
-  Create Table Operator:
-    name:default.nzhang_Tmp_n1
+  Create Table{"name:":"default.nzhang_Tmp_n1"}
 
 PREHOOK: query: create table nzhang_Tmp_n1(a int, b string)
 PREHOOK: type: CREATETABLE
@@ -3728,8 +3723,7 @@ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
 Stage-3
   Stats Work{}
     Stage-4
-      Create Table Operator:
-        name:default.nzhang_CTAS1_n1
+      Create Table{"name:":"default.nzhang_CTAS1_n1"}
         Stage-0
           Move Operator
             Stage-1
@@ -3796,8 +3790,7 @@ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
 Stage-3
   Stats Work{}
     Stage-4
-      Create Table Operator:
-        name:default.nzhang_ctas3_n1
+      Create Table{"name:":"default.nzhang_ctas3_n1"}
         Stage-0
           Move Operator
             Stage-1
@@ -3862,8 +3855,7 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_dtt
 Stage-0
-  Create Table Operator:
-    name:default.acid_dtt
+  Create Table{"name:":"default.acid_dtt"}
 
 PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
diff --git a/ql/src/test/results/clientpositive/llap/partition_ctas.q.out b/ql/src/test/results/clientpositive/llap/partition_ctas.q.out
index 3e290b3..d0a706c 100644
--- a/ql/src/test/results/clientpositive/llap/partition_ctas.q.out
+++ b/ql/src/test/results/clientpositive/llap/partition_ctas.q.out
@@ -92,14 +92,13 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          partition columns: key string
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.partition_ctas_1
+    Create Table
+      columns: value string
+      input format: org.apache.hadoop.mapred.TextInputFormat
+      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+      partition columns: key string
+      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      name: default.partition_ctas_1
 
   Stage: Stage-0
     Move Operator
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out b/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
index 4f1a479..298f7f3 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
@@ -129,13 +129,12 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key int, value string, part int
-          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          name: default.rcfile_createas1b
+    Create Table
+      columns: key int, value string, part int
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+      serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+      name: default.rcfile_createas1b
 
   Stage: Stage-3
     Stats Work
diff --git a/ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out b/ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out
index 1fca347..01d2bcc 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin_reddedup.q.out
@@ -496,16 +496,15 @@ STAGE PLANS:
     Dependency Collection
 
   Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: c_name string, c_custkey bigint, o_orderkey bigint, o_orderdate string, o_totalprice double, _c5 double
-          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-          serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-          name: tpch_test.q18_large_volume_customer_cached
-          table properties:
-            transactional true
-            transactional_properties default
+    Create Table
+      columns: c_name string, c_custkey bigint, o_orderkey bigint, o_orderdate string, o_totalprice double, _c5 double
+      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+      serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+      name: tpch_test.q18_large_volume_customer_cached
+      table properties:
+        transactional true
+        transactional_properties default
 
   Stage: Stage-3
     Stats Work
diff --git a/ql/src/test/results/clientpositive/llap/temp_table.q.out b/ql/src/test/results/clientpositive/llap/temp_table.q.out
index 45be750..af147bf 100644
--- a/ql/src/test/results/clientpositive/llap/temp_table.q.out
... 1457 lines suppressed ...