You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2016/10/27 21:26:38 UTC
hive git commit: HIVE-15087 : integrate MM tables into ACID: replace
hivecommit property with ACID property (Wei Zheng)
Repository: hive
Updated Branches:
refs/heads/hive-14535 b143f5ce3 -> c587404d4
HIVE-15087 : integrate MM tables into ACID: replace hivecommit property with ACID property (Wei Zheng)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c587404d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c587404d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c587404d
Branch: refs/heads/hive-14535
Commit: c587404d4d2f0995af822aa6871d4aadfbf6aeda
Parents: b143f5c
Author: Wei Zheng <we...@apache.org>
Authored: Thu Oct 27 14:26:25 2016 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Thu Oct 27 14:26:25 2016 -0700
----------------------------------------------------------------------
metastore/if/hive_metastore.thrift | 2 -
.../thrift/gen-cpp/hive_metastore_constants.cpp | 2 -
.../thrift/gen-cpp/hive_metastore_constants.h | 1 -
.../metastore/api/hive_metastoreConstants.java | 2 -
.../src/gen/thrift/gen-php/metastore/Types.php | 5 -
.../thrift/gen-py/hive_metastore/constants.py | 1 -
.../thrift/gen-rb/hive_metastore_constants.rb | 2 -
.../hadoop/hive/metastore/MetaStoreUtils.java | 26 ++---
.../hadoop/hive/metastore/ObjectStore.java | 2 +-
.../hadoop/hive/metastore/TestObjectStore.java | 3 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 2 +-
.../apache/hadoop/hive/ql/exec/MoveTask.java | 6 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 11 --
.../hive/ql/io/CombineHiveInputFormat.java | 2 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 2 +-
.../hadoop/hive/ql/optimizer/SamplePruner.java | 2 +-
.../hive/ql/parse/DDLSemanticAnalyzer.java | 4 +-
.../hive/ql/parse/ImportSemanticAnalyzer.java | 4 +-
.../hive/ql/parse/LoadSemanticAnalyzer.java | 2 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 13 +--
.../queries/clientnegative/mm_concatenate.q | 5 +-
ql/src/test/queries/clientpositive/mm_all.q | 54 +++++-----
ql/src/test/queries/clientpositive/mm_all2.q | 8 +-
ql/src/test/queries/clientpositive/mm_current.q | 8 +-
.../results/clientnegative/mm_concatenate.q.out | 4 +-
.../results/clientpositive/llap/mm_all.q.out | 104 +++++++++----------
.../results/clientpositive/llap/mm_all2.q.out | 12 +--
.../clientpositive/llap/mm_current.q.out | 12 +--
28 files changed, 144 insertions(+), 157 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 01a919b..8d06010 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -1524,7 +1524,5 @@ const string META_TABLE_STORAGE = "storage_handler",
const string TABLE_IS_TRANSACTIONAL = "transactional",
const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",
const string TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties",
-const string TABLE_IS_MM = "hivecommit",
-
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
index ccc61cb..1cbd176 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
@@ -59,8 +59,6 @@ hive_metastoreConstants::hive_metastoreConstants() {
TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties";
- TABLE_IS_MM = "hivecommit";
-
}
}}} // namespace
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
index 92a2116..3d068c3 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h
@@ -39,7 +39,6 @@ class hive_metastoreConstants {
std::string TABLE_IS_TRANSACTIONAL;
std::string TABLE_NO_AUTO_COMPACT;
std::string TABLE_TRANSACTIONAL_PROPERTIES;
- std::string TABLE_IS_MM;
};
extern const hive_metastoreConstants g_hive_metastore_constants;
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
index 2503d18..8de8896 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
@@ -84,6 +84,4 @@ public class hive_metastoreConstants {
public static final String TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties";
- public static final String TABLE_IS_MM = "hivecommit";
-
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index e4eea39..9dd14d7 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -19787,7 +19787,6 @@ final class Constant extends \Thrift\Type\TConstant {
static protected $TABLE_IS_TRANSACTIONAL;
static protected $TABLE_NO_AUTO_COMPACT;
static protected $TABLE_TRANSACTIONAL_PROPERTIES;
- static protected $TABLE_IS_MM;
static protected function init_DDL_TIME() {
return "transient_lastDdlTime";
@@ -19884,10 +19883,6 @@ final class Constant extends \Thrift\Type\TConstant {
static protected function init_TABLE_TRANSACTIONAL_PROPERTIES() {
return "transactional_properties";
}
-
- static protected function init_TABLE_IS_MM() {
- return "hivecommit";
- }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
index 6232737..5100236 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py
@@ -33,4 +33,3 @@ META_TABLE_STORAGE = "storage_handler"
TABLE_IS_TRANSACTIONAL = "transactional"
TABLE_NO_AUTO_COMPACT = "no_auto_compaction"
TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"
-TABLE_IS_MM = "hivecommit"
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
index 118a54e..6aa7143 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb
@@ -55,5 +55,3 @@ TABLE_NO_AUTO_COMPACT = %q"no_auto_compaction"
TABLE_TRANSACTIONAL_PROPERTIES = %q"transactional_properties"
-TABLE_IS_MM = %q"hivecommit"
-
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 28fcfa8..3ee1f1c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1885,19 +1885,19 @@ public class MetaStoreUtils {
csNew.setStatsObj(list);
}
- public static boolean isMmTable(Table table) {
- return isMmTable(table.getParameters());
- }
-
- public static boolean isMmTable(Map<String, String> params) {
- // TODO: perhaps it should be a 3rd value for 'transactional'?
- String value = params.get(hive_metastoreConstants.TABLE_IS_MM);
- return value != null && value.equalsIgnoreCase("true");
+ // TODO The following two utility methods can be moved to AcidUtils once no class in metastore is relying on them,
+ // right now ObjectStore.getAllMmTablesForCleanup is calling these method
+ /**
+ * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE
+ * @param params table properties
+ * @return true if table is an INSERT_ONLY table, false otherwise
+ */
+ public static boolean isInsertOnlyTable(Map<String, String> params) {
+ String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+ return transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp);
}
-
- public static boolean isMmTable(Properties params) {
- // TODO: perhaps it should be a 3rd value for 'transactional'?
- String value = params.getProperty(hive_metastoreConstants.TABLE_IS_MM);
- return value != null && value.equalsIgnoreCase("true");
+ public static boolean isInsertOnlyTable(Properties params) {
+ String transactionalProp = params.getProperty(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+ return transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index c679b35..3d97cbf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -8916,7 +8916,7 @@ public class ObjectStore implements RawStore, Configurable {
pm.retrieveAll(tables);
ArrayList<FullTableName> result = new ArrayList<>(tables.size());
for (MTable table : tables) {
- if (MetaStoreUtils.isMmTable(table.getParameters())) {
+ if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
result.add(new FullTableName(table.getDatabase().getName(), table.getTableName()));
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index a8d3495..aa9fa61 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -216,7 +216,8 @@ public class TestObjectStore {
StorageDescriptor sd = createFakeSd("mock:/foo");
HashMap<String,String> params = new HashMap<String,String>();
params.put("EXTERNAL", "false");
- params.put(hive_metastoreConstants.TABLE_IS_MM, "true");
+ params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
+ params.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only");
Table tbl = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd,
null, params, null, null, "MANAGED_TABLE");
objectStore.createTable(tbl);
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 690cdff..19f743b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1603,7 +1603,7 @@ public class Driver implements CommandProcessor {
default: return null;
}
return (t != null && !t.isTemporary()
- && MetaStoreUtils.isMmTable(t.getParameters())) ? t : null;
+ && MetaStoreUtils.isInsertOnlyTable(t.getParameters())) ? t : null;
}
private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 6e3ba98..22843c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -316,7 +316,8 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
checkFileFormats(db, tbd, table);
- boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID;
+ boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
+ work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY;
if (tbd.isMmTable() && isAcid) {
throw new HiveException("ACID and MM are not supported");
}
@@ -442,7 +443,8 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
tbd.getReplace(),
dpCtx.getNumDPCols(),
(tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(),
- work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
+ work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
+ work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY,
SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(),
work.getLoadTableWork().getWriteType(),
tbd.getMmWriteId());
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index ecbc216..2dfbc8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1109,17 +1109,6 @@ public class AcidUtils {
}
/**
- * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE
- * @param table table
- * @return true if table is an INSERT_ONLY table, false otherwise
- */
- public static boolean isInsertOnlyTable(Table table) {
- String transactionalProp = table.getProperty(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
- return transactionalProp != null &&
- AcidUtils.AcidOperationalProperties.INSERT_ONLY_STRING.equals(transactionalProp);
- }
-
- /**
* Sets the acidOperationalProperties in the configuration object argument.
* @param conf Mutable configuration object
* @param properties An acidOperationalProperties object to initialize from.
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index 59d6142..cc1de11 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -107,7 +107,7 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
getInputFormatFromCache(inputFormatClass, conf);
boolean isAvoidSplitCombine = inputFormat instanceof AvoidSplitCombination &&
((AvoidSplitCombination) inputFormat).shouldSkipCombine(paths[i + start], conf);
- boolean isMmTable = MetaStoreUtils.isMmTable(part.getTableDesc().getProperties());
+ boolean isMmTable = MetaStoreUtils.isInsertOnlyTable(part.getTableDesc().getProperties());
if (isAvoidSplitCombine || isMmTable) {
if (LOG.isDebugEnabled()) {
LOG.debug("The path [" + paths[i + start] +
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c7ac452..4a8df95 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1506,7 +1506,7 @@ public class Hive {
throws HiveException {
Table tbl = getTable(tableName);
boolean isMmTableWrite = (mmWriteId != null);
- Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isMmTable(tbl.getParameters()));
+ Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()));
loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs,
isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId);
if (isMmTableWrite && isCommitMmWrite) {
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
index 2ad1f1c..9d2e031 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
@@ -192,7 +192,7 @@ public class SamplePruner extends Transform {
// check if input pruning is possible
// TODO: this relies a lot on having one file per bucket. No support for MM tables for now.
- boolean isMmTable = MetaStoreUtils.isMmTable(part.getTable().getParameters());
+ boolean isMmTable = MetaStoreUtils.isInsertOnlyTable(part.getTable().getParameters());
if (sampleDescr.getInputPruning() && !isMmTable) {
LOG.trace("numerator = " + num);
LOG.trace("denominator = " + den);
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 038cbbf..3e016f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -964,7 +964,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
}
// It would be possible to support this, but this is such a pointless command.
- if (MetaStoreUtils.isMmTable(table.getParameters())) {
+ if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
throw new SemanticException("Truncating MM table columns not presently supported");
}
@@ -1590,7 +1590,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
try {
tblObj = getTable(tableName);
// TODO: we should probably block all ACID tables here.
- if (MetaStoreUtils.isMmTable(tblObj.getParameters())) {
+ if (MetaStoreUtils.isInsertOnlyTable(tblObj.getParameters())) {
throw new SemanticException("Merge is not supported for MM tables");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 2a525e7..87b85c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -163,7 +163,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
// Create table associated with the import
// Executed if relevant, and used to contain all the other details about the table if not.
CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable());
- boolean isSourceMm = MetaStoreUtils.isMmTable(tblDesc.getTblProps());
+ boolean isSourceMm = MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps());
if (isExternalSet) {
if (isSourceMm) {
@@ -233,7 +233,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
}
Long mmWriteId = null;
- if (table != null && MetaStoreUtils.isMmTable(table.getParameters())) {
+ if (table != null && MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
mmWriteId = db.getNextTableWriteId(table.getDbName(), table.getTableName());
} else if (table == null && isSourceMm) {
// We could import everything as is - directories and IDs, but that won't work with ACID
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 26274f5..bc6ef13 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -263,7 +263,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
Long mmWriteId = null;
Table tbl = ts.tableHandle;
- if (MetaStoreUtils.isMmTable(tbl.getParameters())) {
+ if (MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
try {
mmWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName());
} catch (HiveException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 36c9049..35cfcd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6575,7 +6575,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
boolean isNonNativeTable = dest_tab.isNonNative();
- isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters());
+ isMmTable = MetaStoreUtils.isInsertOnlyTable(dest_tab.getParameters());
if (isNonNativeTable || isMmTable) {
queryTmpdir = dest_path;
} else {
@@ -6647,7 +6647,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
.getAuthority(), partPath.toUri().getPath());
- isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters());
+ isMmTable = MetaStoreUtils.isInsertOnlyTable(dest_tab.getParameters());
queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path);
Utilities.LOG14535.info("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + dest_path);
table_desc = Utilities.getTableDesc(dest_tab);
@@ -6702,7 +6702,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
field_schemas = new ArrayList<FieldSchema>();
destTableIsTemporary = tblDesc.isTemporary();
destTableIsMaterialization = tblDesc.isMaterialization();
- if (MetaStoreUtils.isMmTable(tblDesc.getTblProps())) {
+ if (MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps())) {
isMmTable = isMmCtas = true;
// TODO# this should really get current ACID txn; assuming ACID works correctly the txn
// should have been opened to create the ACID table. For now use the first ID.
@@ -6961,7 +6961,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
fileSinkDesc.setHiveServerQuery(SessionState.get().isHiveServerQuery());
// If this is an insert, update, or delete on an ACID table then mark that so the
// FileSinkOperator knows how to properly write to it.
- if (destTableIsAcid && !AcidUtils.isInsertOnlyTable(dest_part.getTable())) {
+ if (destTableIsAcid && dest_part != null && dest_part.getTable() != null &&
+ !MetaStoreUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) {
AcidUtils.Operation wt = updating() ? AcidUtils.Operation.UPDATE :
(deleting() ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT);
fileSinkDesc.setWriteType(wt);
@@ -7159,7 +7160,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private void checkAcidConstraints(QB qb, TableDesc tableDesc,
Table table, AcidUtils.Operation acidOp) throws SemanticException {
String tableName = tableDesc.getTableName();
- if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
+ if (!qb.getParseInfo().isInsertIntoTable(tableName) && !Operation.INSERT_ONLY.equals(acidOp)) {
LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
}
@@ -13139,7 +13140,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private AcidUtils.Operation getAcidType(Table table, Class<? extends OutputFormat> of) {
if (SessionState.get() == null || !SessionState.get().getTxnMgr().supportsAcid()) {
return AcidUtils.Operation.NOT_ACID;
- } else if (AcidUtils.isInsertOnlyTable(table)) {
+ } else if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
return AcidUtils.Operation.INSERT_ONLY;
} else if (isAcidOutputFormat(of)) {
return getAcidType();
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/queries/clientnegative/mm_concatenate.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mm_concatenate.q b/ql/src/test/queries/clientnegative/mm_concatenate.q
index c580767..4b13c60 100644
--- a/ql/src/test/queries/clientnegative/mm_concatenate.q
+++ b/ql/src/test/queries/clientnegative/mm_concatenate.q
@@ -1,4 +1,7 @@
-create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true');
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table concat_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table concat_mm select key from src limit 10;
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/queries/clientpositive/mm_all.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q
index 9d1bf8a..0f2c96d 100644
--- a/ql/src/test/queries/clientpositive/mm_all.q
+++ b/ql/src/test/queries/clientpositive/mm_all.q
@@ -4,6 +4,8 @@ set hive.fetch.task.conversion=none;
set tez.grouping.min-size=1;
set tez.grouping.max-size=2;
set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-- Force multiple writers when reading
@@ -15,7 +17,7 @@ insert into table intermediate partition(p='457') select distinct key from src w
drop table part_mm;
-create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true');
+create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
explain insert into table part_mm partition(key_mm='455') select key from intermediate;
insert into table part_mm partition(key_mm='455') select key from intermediate;
insert into table part_mm partition(key_mm='456') select key from intermediate;
@@ -28,7 +30,7 @@ select * from part_mm order by key, key_mm;
drop table part_mm;
drop table simple_mm;
-create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true');
+create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table simple_mm select key from intermediate;
insert overwrite table simple_mm select key from intermediate;
select * from simple_mm order by key;
@@ -49,7 +51,7 @@ set hive.merge.sparkfiles=false;
set hive.merge.tezfiles=false;
create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
- tblproperties ('hivecommit'='true');
+ tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table dp_mm partition (key1='123', key2) select key, key from intermediate;
@@ -60,7 +62,7 @@ drop table dp_mm;
-- union
-create table union_mm(id int) tblproperties ('hivecommit'='true');
+create table union_mm(id int) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table union_mm
select temps.p from (
select key as p from intermediate
@@ -103,7 +105,7 @@ select * from union_mm order by id;
drop table union_mm;
-create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true');
+create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table partunion_mm partition(key)
select temps.* from (
select key as p, key from intermediate
@@ -116,7 +118,7 @@ drop table partunion_mm;
create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
- stored as directories tblproperties ('hivecommit'='true');
+ stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table skew_mm
select key, key, key from intermediate;
@@ -126,7 +128,7 @@ drop table skew_mm;
create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int)
-skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true');
+skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only");
insert into table skew_dp_union_mm partition (k3)
select key as i, key as j, key as k, key as l from intermediate
@@ -145,7 +147,7 @@ set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
-create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true');
+create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table merge0_mm select key from intermediate;
select * from merge0_mm;
@@ -158,7 +160,7 @@ select * from merge0_mm;
drop table merge0_mm;
-create table merge2_mm (id int) tblproperties('hivecommit'='true');
+create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table merge2_mm select key from intermediate;
select * from merge2_mm;
@@ -171,7 +173,7 @@ select * from merge2_mm;
drop table merge2_mm;
-create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true');
+create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table merge1_mm partition (key) select key, key from intermediate;
select * from merge1_mm;
@@ -191,12 +193,12 @@ set hive.merge.mapredfiles=false;
drop table ctas0_mm;
-create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate;
+create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate;
select * from ctas0_mm;
drop table ctas0_mm;
drop table ctas1_mm;
-create table ctas1_mm tblproperties ('hivecommit'='true') as
+create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
select * from intermediate union all select * from intermediate;
select * from ctas1_mm;
drop table ctas1_mm;
@@ -204,7 +206,7 @@ drop table ctas1_mm;
drop table iow0_mm;
-create table iow0_mm(key int) tblproperties('hivecommit'='true');
+create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert overwrite table iow0_mm select key from intermediate;
insert into table iow0_mm select key + 1 from intermediate;
select * from iow0_mm order by key;
@@ -214,7 +216,7 @@ drop table iow0_mm;
drop table iow1_mm;
-create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true');
+create table iow1_mm(key int) partitioned by (key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert overwrite table iow1_mm partition (key2)
select key as k1, key from intermediate union all select key as k1, key from intermediate;
insert into table iow1_mm partition (key2)
@@ -232,7 +234,7 @@ drop table iow1_mm;
drop table load0_mm;
-create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true');
+create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
load data local inpath '../../data/files/kv1.txt' into table load0_mm;
select count(1) from load0_mm;
load data local inpath '../../data/files/kv2.txt' into table load0_mm;
@@ -250,7 +252,7 @@ load data local inpath '../../data/files/kv2.txt' into table intermediate2;
load data local inpath '../../data/files/kv3.txt' into table intermediate2;
drop table load1_mm;
-create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true');
+create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm;
load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm;
select count(1) from load1_mm;
@@ -266,7 +268,7 @@ drop table load1_mm;
drop table load2_mm;
create table load2_mm (key string, value string)
- partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true');
+ partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
load data local inpath '../../data/files/kv1.txt' into table intermediate2;
load data local inpath '../../data/files/kv2.txt' into table intermediate2;
load data local inpath '../../data/files/kv3.txt' into table intermediate2;
@@ -281,9 +283,9 @@ drop table intermmediate_part;
drop table intermmediate_nonpart;
create table intermediate_nonpart(key int, p int);
insert into intermediate_nonpart select * from intermediate;
-create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true');
+create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into intermmediate_nonpart select * from intermediate;
-create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true');
+create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table intermmediate partition(p) select key, p from intermediate;
set hive.exim.test.mode=true;
@@ -300,7 +302,7 @@ drop table intermmediate_nonpart;
-- non-MM export to MM table, with and without partitions
drop table import0_mm;
-create table import0_mm(key int, p int) tblproperties('hivecommit'='true');
+create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
select * from import0_mm order by key, p;
drop table import0_mm;
@@ -309,7 +311,7 @@ drop table import0_mm;
drop table import1_mm;
create table import1_mm(key int) partitioned by (p int)
- stored as orc tblproperties('hivecommit'='true');
+ stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
import table import1_mm from 'ql/test/data/exports/intermediate_part';
select * from import1_mm order by key, p;
drop table import1_mm;
@@ -332,13 +334,13 @@ drop table import3_mm;
-- MM export into existing MM table, non-part and partial part
drop table import4_mm;
-create table import4_mm(key int, p int) tblproperties('hivecommit'='true');
+create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
select * from import4_mm order by key, p;
drop table import4_mm;
drop table import5_mm;
-create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true');
+create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
select * from import5_mm order by key, p;
drop table import5_mm;
@@ -363,8 +365,8 @@ set hive.exim.test.mode=false;
drop table multi0_1_mm;
drop table multi0_2_mm;
-create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true');
-create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true');
+create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
from intermediate
insert overwrite table multi0_1_mm select key, p
@@ -392,7 +394,7 @@ drop table multi0_2_mm;
drop table multi1_mm;
-create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true');
+create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
from intermediate
insert into table multi1_mm partition(p=1) select p, key
insert into table multi1_mm partition(p=2) select key, p;
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/queries/clientpositive/mm_all2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_all2.q b/ql/src/test/queries/clientpositive/mm_all2.q
index c4f3058..f3d0f07 100644
--- a/ql/src/test/queries/clientpositive/mm_all2.q
+++ b/ql/src/test/queries/clientpositive/mm_all2.q
@@ -4,6 +4,8 @@ set hive.fetch.task.conversion=none;
set tez.grouping.min-size=1;
set tez.grouping.max-size=2;
set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-- Bucketing tests are slow and some tablesample ones don't work w/o MM
@@ -20,7 +22,7 @@ insert into table intermediate partition(p='457') select distinct key from src w
drop table bucket0_mm;
create table bucket0_mm(key int, id int)
clustered by (key) into 2 buckets
-tblproperties('hivecommit'='true');
+tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table bucket0_mm select key, key from intermediate;
select * from bucket0_mm;
select * from bucket0_mm tablesample (bucket 1 out of 2) s;
@@ -35,7 +37,7 @@ drop table bucket0_mm;
drop table bucket1_mm;
create table bucket1_mm(key int, id int) partitioned by (key2 int)
clustered by (key) sorted by (key) into 2 buckets
-tblproperties('hivecommit'='true');
+tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table bucket1_mm partition (key2)
select key + 1, key, key - 1 from intermediate
union all
@@ -50,7 +52,7 @@ drop table bucket1_mm;
drop table bucket2_mm;
create table bucket2_mm(key int, id int)
clustered by (key) into 10 buckets
-tblproperties('hivecommit'='true');
+tblproperties("transactional"="true", "transactional_properties"="insert_only");
insert into table bucket2_mm select key, key from intermediate where key == 0;
select * from bucket2_mm;
select * from bucket2_mm tablesample (bucket 1 out of 10) s;
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/queries/clientpositive/mm_current.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q
index bb166cf..93103d3 100644
--- a/ql/src/test/queries/clientpositive/mm_current.q
+++ b/ql/src/test/queries/clientpositive/mm_current.q
@@ -5,6 +5,8 @@ set hive.fetch.task.conversion=none;
set tez.grouping.min-size=1;
set tez.grouping.max-size=2;
set hive.tez.auto.reducer.parallelism=false;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
drop table intermediate;
create table intermediate(key int) partitioned by (p int) stored as orc;
@@ -14,8 +16,8 @@ insert into table intermediate partition(p='456') select distinct key from src w
drop table multi0_1_mm;
drop table multi0_2_mm;
-create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true');
-create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true');
+create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
from intermediate
insert overwrite table multi0_1_mm select key, p
@@ -43,7 +45,7 @@ drop table multi0_2_mm;
drop table multi1_mm;
-create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true');
+create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
from intermediate
insert into table multi1_mm partition(p=1) select p, key
insert into table multi1_mm partition(p=2) select key, p;
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/results/clientnegative/mm_concatenate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/mm_concatenate.q.out b/ql/src/test/results/clientnegative/mm_concatenate.q.out
index 0736409..5c004b9 100644
--- a/ql/src/test/results/clientnegative/mm_concatenate.q.out
+++ b/ql/src/test/results/clientnegative/mm_concatenate.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true')
+PREHOOK: query: create table concat_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@concat_mm
-POSTHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true')
+POSTHOOK: query: create table concat_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@concat_mm
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index 57c878c..1ba6dce 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -43,11 +43,11 @@ PREHOOK: query: drop table part_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table part_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true')
+PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@part_mm
-POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true')
+POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@part_mm
@@ -236,11 +236,11 @@ PREHOOK: query: drop table simple_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table simple_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true')
+PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@simple_mm
-POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true')
+POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@simple_mm
@@ -352,12 +352,12 @@ POSTHOOK: query: -- simple DP (no bucketing)
drop table dp_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
- tblproperties ('hivecommit'='true')
+ tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@dp_mm
POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
- tblproperties ('hivecommit'='true')
+ tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@dp_mm
@@ -422,13 +422,13 @@ POSTHOOK: Input: default@dp_mm
POSTHOOK: Output: default@dp_mm
PREHOOK: query: -- union
-create table union_mm(id int) tblproperties ('hivecommit'='true')
+create table union_mm(id int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@union_mm
POSTHOOK: query: -- union
-create table union_mm(id int) tblproperties ('hivecommit'='true')
+create table union_mm(id int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@union_mm
@@ -648,11 +648,11 @@ POSTHOOK: query: drop table union_mm
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@union_mm
POSTHOOK: Output: default@union_mm
-PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true')
+PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@partunion_mm
-POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true')
+POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@partunion_mm
@@ -750,12 +750,12 @@ POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@partunion_mm
POSTHOOK: Output: default@partunion_mm
PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
- stored as directories tblproperties ('hivecommit'='true')
+ stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@skew_mm
POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
- stored as directories tblproperties ('hivecommit'='true')
+ stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@skew_mm
@@ -801,12 +801,12 @@ POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@skew_mm
POSTHOOK: Output: default@skew_mm
PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int)
-skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true')
+skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@skew_dp_union_mm
POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int)
-skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true')
+skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@skew_dp_union_mm
@@ -929,11 +929,11 @@ POSTHOOK: query: drop table skew_dp_union_mm
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@skew_dp_union_mm
POSTHOOK: Output: default@skew_dp_union_mm
-PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true')
+PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@merge0_mm
-POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true')
+POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@merge0_mm
@@ -1009,11 +1009,11 @@ POSTHOOK: query: drop table merge0_mm
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@merge0_mm
POSTHOOK: Output: default@merge0_mm
-PREHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@merge2_mm
-POSTHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@merge2_mm
@@ -1089,11 +1089,11 @@ POSTHOOK: query: drop table merge2_mm
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@merge2_mm
POSTHOOK: Output: default@merge2_mm
-PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true')
+PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@merge1_mm
-POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true')
+POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@merge1_mm
@@ -1223,7 +1223,7 @@ POSTHOOK: query: -- TODO: need to include merge+union+DP, but it's broken for no
drop table ctas0_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate
+PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
PREHOOK: type: CREATETABLE_AS_SELECT
PREHOOK: Input: default@intermediate
PREHOOK: Input: default@intermediate@p=455
@@ -1231,7 +1231,7 @@ PREHOOK: Input: default@intermediate@p=456
PREHOOK: Input: default@intermediate@p=457
PREHOOK: Output: database:default
PREHOOK: Output: default@ctas0_mm
-POSTHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate
+POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
POSTHOOK: type: CREATETABLE_AS_SELECT
POSTHOOK: Input: default@intermediate
POSTHOOK: Input: default@intermediate@p=455
@@ -1267,7 +1267,7 @@ PREHOOK: query: drop table ctas1_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table ctas1_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as
+PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
select * from intermediate union all select * from intermediate
PREHOOK: type: CREATETABLE_AS_SELECT
PREHOOK: Input: default@intermediate
@@ -1276,7 +1276,7 @@ PREHOOK: Input: default@intermediate@p=456
PREHOOK: Input: default@intermediate@p=457
PREHOOK: Output: database:default
PREHOOK: Output: default@ctas1_mm
-POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as
+POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
select * from intermediate union all select * from intermediate
POSTHOOK: type: CREATETABLE_AS_SELECT
POSTHOOK: Input: default@intermediate
@@ -1319,11 +1319,11 @@ PREHOOK: query: drop table iow0_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table iow0_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@iow0_mm
@@ -1418,11 +1418,11 @@ PREHOOK: query: drop table iow1_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table iow1_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@iow1_mm
@@ -1690,11 +1690,11 @@ PREHOOK: query: drop table load0_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table load0_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true')
+PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@load0_mm
-POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true')
+POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@load0_mm
@@ -1801,11 +1801,11 @@ PREHOOK: query: drop table load1_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table load1_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true')
+PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@load1_mm
-POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true')
+POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@load1_mm
@@ -1913,12 +1913,12 @@ PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table load2_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table load2_mm (key string, value string)
- partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true')
+ partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@load2_mm
POSTHOOK: query: create table load2_mm (key string, value string)
- partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true')
+ partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@load2_mm
@@ -2018,11 +2018,11 @@ POSTHOOK: Input: default@intermediate@p=457
POSTHOOK: Output: default@intermediate_nonpart
POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@intermmediate_nonpart
@@ -2042,11 +2042,11 @@ POSTHOOK: Input: default@intermediate@p=457
POSTHOOK: Output: default@intermmediate_nonpart
POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@intermmediate
-POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@intermmediate
@@ -2137,11 +2137,11 @@ POSTHOOK: query: -- non-MM export to MM table, with and without partitions
drop table import0_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@import0_mm
-POSTHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@import0_mm
@@ -2180,12 +2180,12 @@ PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table import1_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table import1_mm(key int) partitioned by (p int)
- stored as orc tblproperties('hivecommit'='true')
+ stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@import1_mm
POSTHOOK: query: create table import1_mm(key int) partitioned by (p int)
- stored as orc tblproperties('hivecommit'='true')
+ stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@import1_mm
@@ -2340,11 +2340,11 @@ POSTHOOK: query: -- MM export into existing MM table, non-part and partial part
drop table import4_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@import4_mm
-POSTHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@import4_mm
@@ -2382,11 +2382,11 @@ PREHOOK: query: drop table import5_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table import5_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@import5_mm
-POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@import5_mm
@@ -2524,19 +2524,19 @@ PREHOOK: query: drop table multi0_2_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table multi0_2_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi0_1_mm
-POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi0_1_mm
-PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi0_2_mm
@@ -2670,11 +2670,11 @@ PREHOOK: query: drop table multi1_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table multi1_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true')
+PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true')
+POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi1_mm
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/results/clientpositive/llap/mm_all2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all2.q.out b/ql/src/test/results/clientpositive/llap/mm_all2.q.out
index 3921c7d..eaaebe6 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all2.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all2.q.out
@@ -49,13 +49,13 @@ POSTHOOK: query: drop table bucket0_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table bucket0_mm(key int, id int)
clustered by (key) into 2 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket0_mm
POSTHOOK: query: create table bucket0_mm(key int, id int)
clustered by (key) into 2 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket0_mm
@@ -189,13 +189,13 @@ POSTHOOK: query: drop table bucket1_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int)
clustered by (key) sorted by (key) into 2 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket1_mm
POSTHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int)
clustered by (key) sorted by (key) into 2 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket1_mm
@@ -379,13 +379,13 @@ POSTHOOK: query: drop table bucket2_mm
POSTHOOK: type: DROPTABLE
PREHOOK: query: create table bucket2_mm(key int, id int)
clustered by (key) into 10 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket2_mm
POSTHOOK: query: create table bucket2_mm(key int, id int)
clustered by (key) into 10 buckets
-tblproperties('hivecommit'='true')
+tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket2_mm
http://git-wip-us.apache.org/repos/asf/hive/blob/c587404d/ql/src/test/results/clientpositive/llap/mm_current.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out
index 0522288..a00f935 100644
--- a/ql/src/test/results/clientpositive/llap/mm_current.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out
@@ -36,19 +36,19 @@ PREHOOK: query: drop table multi0_2_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table multi0_2_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false')
+PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi0_1_mm
-POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false')
+POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi0_1_mm
-PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false')
+PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false')
+POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi0_2_mm
@@ -168,11 +168,11 @@ PREHOOK: query: drop table multi1_mm
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table multi1_mm
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false')
+PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false')
+POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@multi1_mm