You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/01/11 21:55:01 UTC

[21/22] hive git commit: HIVE-14498: Freshness period for query rewriting using materialized views (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
index 12c24a5..5411bc4 100644
--- a/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
@@ -43,9 +43,14 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TXNID NUMBER(19),
   CTC_DATABASE varchar(128) NOT NULL,
   CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
+  CTC_PARTITION varchar(767),
+  CTC_ID bigint GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1) NOT NULL,
+  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL
 ) ROWDEPENDENCIES;
 
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_ID);
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX2 ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+
 CREATE TABLE NEXT_TXN_ID (
   NTXN_NEXT NUMBER(19) NOT NULL
 );

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index 8a8f294..90e02f7 100644
--- a/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -7,6 +7,7 @@ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
 @045-HIVE-16886.oracle.sql;
 @046-HIVE-17566.oracle.sql;
 @047-HIVE-18202-oracle.sql;
+@048-HIVE-14498.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/postgres/047-HIVE-14498.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/047-HIVE-14498.postgres.sql b/metastore/scripts/upgrade/postgres/047-HIVE-14498.postgres.sql
new file mode 100644
index 0000000..8d4de88
--- /dev/null
+++ b/metastore/scripts/upgrade/postgres/047-HIVE-14498.postgres.sql
@@ -0,0 +1,23 @@
+CREATE TABLE "MV_CREATION_METADATA" (
+    "TBL_ID" BIGINT NOT NULL,
+    "TBL_NAME" character varying(256) NOT NULL,
+    "LAST_TRANSACTION_INFO" TEXT NOT NULL
+);
+ALTER TABLE ONLY "MV_CREATION_METADATA"
+    ADD CONSTRAINT "MV_CREATION_METADATA_FK" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS_NEW (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767),
+  CTC_ID serial UNIQUE,
+  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL
+);
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS_NEW USING btree (CTC_ID);
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX2 ON COMPLETED_TXN_COMPONENTS_NEW USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+INSERT INTO COMPLETED_TXN_COMPONENTS_NEW (CTC_TXNID, CTC_DATABASE, CTC_TABLE, CTC_PARTITION)
+SELECT CTC_TXNID, CTC_DATABASE, CTC_TABLE, CTC_PARTITION FROM COMPLETED_TXN_COMPONENTS;
+ALTER TABLE COMPLETED_TXN_COMPONENTS RENAME TO COMPLETED_TXN_COMPONENTS_BACKUP;
+ALTER TABLE COMPLETED_TXN_COMPONENTS_NEW RENAME TO COMPLETED_TXN_COMPONENTS;
+DROP TABLE COMPLETED_TXN_COMPONENTS_BACKUP;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
index abe7243..9e2dbc2 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
@@ -659,6 +659,15 @@ CREATE TABLE "WM_MAPPING" (
     "ORDERING" integer
 );
 
+CREATE TABLE "MV_CREATION_METADATA" (
+    "TBL_ID" BIGINT NOT NULL,
+    "TBL_NAME" character varying(256) NOT NULL,
+    "LAST_TRANSACTION_INFO" TEXT NOT NULL
+);
+
+ALTER TABLE ONLY "MV_CREATION_METADATA"
+    ADD CONSTRAINT "MV_CREATION_METADATA_FK" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
 --
 -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
 --

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
index 1fa99af..a81d6ee 100644
--- a/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
@@ -43,9 +43,14 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TXNID bigint,
   CTC_DATABASE varchar(128) NOT NULL,
   CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
+  CTC_PARTITION varchar(767),
+  CTC_ID serial UNIQUE,
+  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL
 );
 
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_ID);
+CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX2 ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+
 CREATE TABLE NEXT_TXN_ID (
   NTXN_NEXT bigint NOT NULL
 );

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
index 91de88f..4737208 100644
--- a/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
@@ -7,6 +7,7 @@ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0';
 \i 044-HIVE-16886.postgres.sql;
 \i 045-HIVE-17566.postgres.sql;
 \i 046-HIVE-18202.postgres.sql;
+\i 047-HIVE-14498.postgres.sql;
 
 UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0';

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 6d48783..89b0bda 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -40,8 +40,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.BlobStorageUtils;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -141,6 +141,9 @@ public class Context {
   // Identify whether the query involves an UPDATE, DELETE or MERGE
   private boolean isUpdateDeleteMerge;
 
+  // Whether the analyzer has been instantiated to read and load materialized view plans
+  private boolean isLoadingMaterializedView;
+
   /**
    * This determines the prefix of the
    * {@link org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.Phase1Ctx#dest}
@@ -1027,6 +1030,15 @@ public class Context {
   public void setIsUpdateDeleteMerge(boolean isUpdate) {
     this.isUpdateDeleteMerge = isUpdate;
   }
+
+  public boolean isLoadingMaterializedView() {
+    return isLoadingMaterializedView;
+  }
+
+  public void setIsLoadingMaterializedView(boolean isLoadingMaterializedView) {
+    this.isLoadingMaterializedView = isLoadingMaterializedView;
+  }
+
   public String getExecutionId() {
     return executionId;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index cf8386b..81e1dd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -21,13 +21,6 @@ package org.apache.hadoop.hive.ql.exec;
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
 
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-
-import java.util.concurrent.ExecutionException;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ListenableFuture;
 import java.io.BufferedWriter;
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
@@ -55,6 +48,9 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -64,6 +60,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.Constants;
@@ -78,6 +75,7 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionResponse;
@@ -109,6 +107,7 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
@@ -183,6 +182,8 @@ import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
 import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
@@ -240,8 +241,6 @@ import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
-import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
-import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
 import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationTranslator;
@@ -285,6 +284,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.stringtemplate.v4.ST;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+
 /**
  * DDLTask implementation.
  *
@@ -1403,11 +1407,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp());
     }
 
-    try {
-      db.alterTable(mv, environmentContext);
-    } catch (InvalidOperationException e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "Unable to alter " + mv.getFullyQualifiedName());
-    }
+    db.alterTable(mv, environmentContext);
+
     return 0;
   }
 
@@ -1557,11 +1558,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
     tbl.getTTable().setPartitionKeys(newPartitionKeys);
 
-    try {
-      db.alterTable(tbl, null);
-    } catch (InvalidOperationException e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "Unable to alter " + tbl.getFullyQualifiedName());
-    }
+    db.alterTable(tbl, null);
 
     work.getInputs().add(new ReadEntity(tbl));
     // We've already locked the table as the input, don't relock it as the output.
@@ -1587,11 +1584,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
 
     if (touchDesc.getPartSpec() == null) {
-      try {
-        db.alterTable(tbl, environmentContext);
-      } catch (InvalidOperationException e) {
-        throw new HiveException("Uable to update table");
-      }
+      db.alterTable(tbl, environmentContext);
       work.getInputs().add(new ReadEntity(tbl));
       addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     } else {
@@ -4918,11 +4911,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     // create the table
     if (crtTbl.getReplaceMode()) {
       // replace-mode creates are really alters using CreateTableDesc.
-      try {
-        db.alterTable(tbl, null);
-      } catch (InvalidOperationException e) {
-        throw new HiveException("Unable to alter table. " + e.getMessage(), e);
-      }
+      db.alterTable(tbl, null);
     } else {
       if ((foreignKeys != null && foreignKeys.size() > 0 ) ||
           (primaryKeys != null && primaryKeys.size() > 0) ||
@@ -5129,6 +5118,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
 
       if (crtView.isMaterialized()) {
+        // We need to update the status of the creation signature
+        String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
+        oldview.getTTable().setCreationMetadata(
+            generateCreationMetadata(db, crtView.getTablesUsed(),
+                txnString == null ? null : new ValidReadTxnList(txnString)));
+        db.alterTable(crtView.getViewName(), oldview, null);
         // This is a replace/rebuild, so we need an exclusive lock
         addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE));
       } else {
@@ -5151,16 +5146,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           oldview.setOutputFormatClass(crtView.getOutputFormat());
         }
         oldview.checkValidity(null);
-        try {
-          db.alterTable(crtView.getViewName(), oldview, null);
-        } catch (InvalidOperationException e) {
-          throw new HiveException(e);
-        }
+        db.alterTable(crtView.getViewName(), oldview, null);
         addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
       }
     } else {
       // We create new view
       Table tbl = crtView.toTable(conf);
+      // We set the signature for the view if it is a materialized view
+      if (tbl.isMaterializedView()) {
+        String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
+        tbl.getTTable().setCreationMetadata(
+            generateCreationMetadata(db, crtView.getTablesUsed(),
+                txnString == null ? null : new ValidReadTxnList(txnString)));
+      }
       db.createTable(tbl, crtView.getIfNotExists());
       addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
 
@@ -5171,6 +5169,38 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
+  private Map<String, BasicTxnInfo> generateCreationMetadata(
+      Hive db, List<String> tablesUsed, ValidReadTxnList txnList)
+          throws SemanticException {
+    Map<String, BasicTxnInfo> signature = new HashMap<>();
+    try {
+      if (!CollectionUtils.isEmpty(tablesUsed)) {
+        if (txnList == null) {
+          for (String fullyQualifiedName : tablesUsed) {
+            signature.put(fullyQualifiedName, new BasicTxnInfo(true));
+          }
+        } else {
+          List<String> dbNames = new ArrayList<>();
+          List<String> tableNames = new ArrayList<>();
+          for (String fullyQualifiedName : tablesUsed) {
+            // Add to creation metadata
+            String[] names =  fullyQualifiedName.split("\\.");
+            dbNames.add(names[0]);
+            tableNames.add(names[1]);
+          }
+          List<BasicTxnInfo> txnInfos =
+              db.getMSC().getLastCompletedTransactionForTables(dbNames, tableNames, txnList);
+          for (int i = 0; i < tablesUsed.size(); i++) {
+            signature.put(tablesUsed.get(i), txnInfos.get(i));
+          }
+        }
+      }
+    } catch (Exception ex) {
+      throw new SemanticException(ex);
+    }
+    return signature;
+  }
+
   private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
 
     if (truncateTableDesc.getColumnIndexes() != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
index a57e4c8..a917c07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
@@ -26,8 +26,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.exec.DDLTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
@@ -69,21 +67,24 @@ public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook {
           DDLTask ddlTask = (DDLTask) task;
           DDLWork work = ddlTask.getWork();
           String tableName = null;
+          boolean isRewriteEnabled = false;
           if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) {
             tableName = work.getCreateViewDesc().toTable(hiveConf).getFullyQualifiedName();
-          }
-          if (work.getAlterMaterializedViewDesc() != null) {
+            isRewriteEnabled = work.getCreateViewDesc().isRewriteEnabled();
+          } else if (work.getAlterMaterializedViewDesc() != null) {
             tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName();
-          }
-          if (tableName == null) {
+            isRewriteEnabled = work.getAlterMaterializedViewDesc().isRewriteEnable();
+          } else {
             continue;
           }
-          Table mvTable = Hive.get().getTable(tableName);
 
-          if (mvTable.isRewriteEnabled()) {
-            HiveMaterializedViewsRegistry.get().addMaterializedView(mvTable);
-          } else {
-            HiveMaterializedViewsRegistry.get().dropMaterializedView(mvTable);
+          if (isRewriteEnabled) {
+            Table mvTable = Hive.get().getTable(tableName);
+            HiveMaterializedViewsRegistry.get().createMaterializedView(mvTable);
+          } else if (work.getAlterMaterializedViewDesc() != null) {
+            // Disabling rewriting, removing from cache
+            String[] names =  tableName.split("\\.");
+            HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
           }
         }
       }
@@ -92,7 +93,7 @@ public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook {
         String message = "Error updating materialized view cache; consider disabling: " + ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING.varname;
         LOG.error(message, e);
         throw new RuntimeException(message, e);
-      }else {
+      } else {
         LOG.debug("Exception during materialized view cache update", e);
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 41614d4..1a2b3c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -27,14 +27,11 @@ import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
 import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
 import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -111,6 +108,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.Materialization;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -120,8 +118,6 @@ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -134,8 +130,11 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMMapping;
 import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -149,6 +148,7 @@ import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.DropTableDesc;
@@ -612,7 +612,7 @@ public class Hive {
   }
 
   public void alterTable(Table newTbl, EnvironmentContext environmentContext)
-      throws InvalidOperationException, HiveException {
+      throws HiveException {
     alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext);
   }
 
@@ -628,19 +628,19 @@ public class Hive {
    * @throws TException
    */
   public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
-      throws InvalidOperationException, HiveException {
+      throws HiveException {
     alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
   }
 
   public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext)
-      throws InvalidOperationException, HiveException {
+      throws HiveException {
     String[] names = Utilities.getDbTableName(fullyQlfdTblName);
     alterTable(names[0], names[1], newTbl, cascade, environmentContext);
   }
 
   public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
       EnvironmentContext environmentContext)
-      throws InvalidOperationException, HiveException {
+      throws HiveException {
 
     try {
       // Remove the DDL_TIME so it gets refreshed
@@ -1448,6 +1448,21 @@ public class Hive {
     }
   }
 
+  private List<Table> getTableObjects(String dbName, List<String> tableNames) throws HiveException {
+    try {
+      return Lists.transform(getMSC().getTableObjectsByName(dbName, tableNames),
+        new com.google.common.base.Function<org.apache.hadoop.hive.metastore.api.Table, Table>() {
+          @Override
+          public Table apply(org.apache.hadoop.hive.metastore.api.Table table) {
+            return new Table(table);
+          }
+        }
+      );
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
   /**
    * Returns all existing tables from default database which match the given
    * pattern. The matching occurs as per Java regular expressions
@@ -1527,42 +1542,88 @@ public class Hive {
    * Get the materialized views that have been enabled for rewriting from the
    * metastore. If the materialized view is in the cache, we do not need to
    * parse it to generate a logical plan for the rewriting. Instead, we
-   * return the version present in the cache.
+   * return the version present in the cache. Further, information provided
+   * by the invalidation cache is useful to know whether a materialized view
+   * can be used for rewriting or not.
    *
    * @return the list of materialized views available for rewriting
    * @throws HiveException
    */
-  public List<RelOptMaterialization> getRewritingMaterializedViews() throws HiveException {
+  public List<RelOptMaterialization> getValidMaterializedViews() throws HiveException {
+    final long diff = conf.getIntVar(HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) * 1000;
+    final long minTime = System.currentTimeMillis() - diff;
     try {
       // Final result
       List<RelOptMaterialization> result = new ArrayList<>();
       for (String dbName : getMSC().getAllDatabases()) {
         // From metastore (for security)
-        List<String> tables = getAllMaterializedViews(dbName);
-        // Cached views (includes all)
-        Collection<RelOptMaterialization> cachedViews =
-            HiveMaterializedViewsRegistry.get().getRewritingMaterializedViews(dbName);
-        if (cachedViews.isEmpty()) {
+        List<String> materializedViewNames = getMaterializedViewsForRewriting(dbName);
+        if (materializedViewNames.isEmpty()) {
           // Bail out: empty list
           continue;
         }
-        Map<String, RelOptMaterialization> qualifiedNameToView =
-            new HashMap<String, RelOptMaterialization>();
-        for (RelOptMaterialization materialization : cachedViews) {
-          qualifiedNameToView.put(materialization.qualifiedTableName.get(0), materialization);
-        }
-        for (String table : tables) {
-          // Compose qualified name
-          String fullyQualifiedName = dbName;
-          if (fullyQualifiedName != null && !fullyQualifiedName.isEmpty()) {
-            fullyQualifiedName = fullyQualifiedName + "." + table;
+        List<Table> materializedViewTables = getTableObjects(dbName, materializedViewNames);
+        Map<String, Materialization> databaseInvalidationInfo =
+            getMSC().getMaterializationsInvalidationInfo(dbName, materializedViewNames);
+        for (Table materializedViewTable : materializedViewTables) {
+          // Check whether the materialized view is invalidated
+          Materialization materializationInvalidationInfo =
+              databaseInvalidationInfo.get(materializedViewTable.getTableName());
+          if (materializationInvalidationInfo == null) {
+            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                " ignored for rewriting as there was no information loaded in the invalidation cache");
+            continue;
+          }
+          long invalidationTime = materializationInvalidationInfo.getInvalidationTime();
+          // If the limit is not met, we do not add the materialized view
+          if (diff == 0L) {
+            if (invalidationTime != 0L) {
+              // If parameter is zero, materialized view cannot be outdated at all
+              LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                  " ignored for rewriting as its contents are outdated");
+              continue;
+            }
           } else {
-            fullyQualifiedName = table;
+            if (invalidationTime != 0 && minTime > invalidationTime) {
+              LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                  " ignored for rewriting as its contents are outdated");
+              continue;
+            }
           }
-          RelOptMaterialization materialization = qualifiedNameToView.get(fullyQualifiedName);
+
+          // It passed the test, load
+          RelOptMaterialization materialization =
+              HiveMaterializedViewsRegistry.get().getRewritingMaterializedView(
+                  dbName, materializedViewTable.getTableName());
           if (materialization != null) {
-            // Add to final result set
-            result.add(materialization);
+            RelOptHiveTable cachedMaterializedViewTable =
+                (RelOptHiveTable) materialization.tableRel.getTable();
+            if (cachedMaterializedViewTable.getHiveTableMD().getCreateTime() ==
+                materializedViewTable.getCreateTime()) {
+              // It is in the cache and up to date
+              result.add(materialization);
+              continue;
+            }
+          }
+
+          // It was not present in the cache (maybe because it was added by another HS2)
+          // or it is not up to date.
+          if (HiveMaterializedViewsRegistry.get().isInitialized()) {
+            // But the registry was fully initialized, thus we need to add it
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                  " was not in the cache");
+            }
+            materialization = HiveMaterializedViewsRegistry.get().createMaterializedView(materializedViewTable);
+            if (materialization != null) {
+              result.add(materialization);
+            }
+          } else {
+            // Otherwise the registry has not been initialized, skip for the time being
+            if (LOG.isWarnEnabled()) {
+              LOG.info("Materialized view " + materializedViewTable.getFullyQualifiedName() + " was skipped "
+                  + "because cache has not been loaded yet");
+            }
           }
         }
       }
@@ -1573,6 +1634,20 @@ public class Hive {
   }
 
   /**
+   * Get materialized views for the specified database that have enabled rewriting.
+   * @param dbName
+   * @return List of materialized view table objects
+   * @throws HiveException
+   */
+  private List<String> getMaterializedViewsForRewriting(String dbName) throws HiveException {
+    try {
+      return getMSC().getMaterializedViewsForRewriting(dbName);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  /**
    * Get all existing database names.
    *
    * @return List of database names.
@@ -2386,14 +2461,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
       environmentContext = new EnvironmentContext();
       environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
     }
-    try {
-      alterTable(tbl, environmentContext);
-    } catch (InvalidOperationException e) {
-      throw new HiveException(e);
-    }
+
+    alterTable(tbl, environmentContext);
 
     fireInsertEvent(tbl, null, (loadFileType == LoadFileType.REPLACE_ALL), newFiles);
   }
+
   /**
    * Creates a partition.
    *

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index 2b1023a..7e5c81b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -19,12 +19,9 @@ package org.apache.hadoop.hive.ql.metadata;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -40,16 +37,12 @@ import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptMaterialization;
 import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rel.type.RelDataTypeImpl;
 import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -58,10 +51,8 @@ import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
@@ -87,7 +78,7 @@ import com.google.common.collect.ImmutableList;
  * Registry for materialized views. The goal of this cache is to avoid parsing and creating
  * logical plans for the materialized views at query runtime. When a query arrives, we will
  * just need to consult this cache and extract the logical plans for the views (which had
- * already been parsed) from it.
+ * already been parsed) from it. This cache lives in HS2.
  */
 public final class HiveMaterializedViewsRegistry {
 
@@ -96,13 +87,12 @@ public final class HiveMaterializedViewsRegistry {
   /* Singleton */
   private static final HiveMaterializedViewsRegistry SINGLETON = new HiveMaterializedViewsRegistry();
 
-  /* Key is the database name. Value a map from a unique identifier for the view comprising
-   * the qualified name and the creation time, to the view object.
-   * Since currently we cannot alter a materialized view, that should suffice to identify
-   * whether the cached view is up to date or not.
-   * Creation time is useful to ensure correctness in case multiple HS2 instances are used. */
-  private final ConcurrentMap<String, ConcurrentMap<ViewKey, RelOptMaterialization>> materializedViews =
-      new ConcurrentHashMap<String, ConcurrentMap<ViewKey, RelOptMaterialization>>();
+  /* Key is the database name. Value a map from the qualified name to the view object. */
+  private final ConcurrentMap<String, ConcurrentMap<String, RelOptMaterialization>> materializedViews =
+      new ConcurrentHashMap<String, ConcurrentMap<String, RelOptMaterialization>>();
+
+  /* Whether the cache has been initialized or not. */
+  private boolean initialized;
 
   private HiveMaterializedViewsRegistry() {
   }
@@ -141,64 +131,82 @@ public final class HiveMaterializedViewsRegistry {
     @Override
     public void run() {
       try {
-        List<Table> materializedViews = new ArrayList<Table>();
         for (String dbName : db.getAllDatabases()) {
-          materializedViews.addAll(db.getAllMaterializedViewObjects(dbName));
-        }
-        for (Table mv : materializedViews) {
-          addMaterializedView(mv);
+          for (Table mv : db.getAllMaterializedViewObjects(dbName)) {
+            addMaterializedView(mv, OpType.LOAD);
+          }
         }
+        initialized = true;
       } catch (HiveException e) {
         LOG.error("Problem connecting to the metastore when initializing the view registry");
       }
     }
   }
 
+  public boolean isInitialized() {
+    return initialized;
+  }
+
+  /**
+   * Adds a newly created materialized view to the cache.
+   *
+   * @param materializedViewTable the materialized view
+   * @param tablesUsed tables used by the materialized view
+   */
+  public RelOptMaterialization createMaterializedView(Table materializedViewTable) {
+    return addMaterializedView(materializedViewTable, OpType.CREATE);
+  }
+
   /**
    * Adds the materialized view to the cache.
    *
    * @param materializedViewTable the materialized view
    */
-  public void addMaterializedView(Table materializedViewTable) {
+  private RelOptMaterialization addMaterializedView(Table materializedViewTable, OpType opType) {
     // Bail out if it is not enabled for rewriting
     if (!materializedViewTable.isRewriteEnabled()) {
-      return;
+      return null;
     }
-    materializedViewTable.getFullyQualifiedName();
 
-    ConcurrentMap<ViewKey, RelOptMaterialization> cq =
-        new ConcurrentHashMap<ViewKey, RelOptMaterialization>();
-    final ConcurrentMap<ViewKey, RelOptMaterialization> prevCq = materializedViews.putIfAbsent(
+    // We are going to create the map for each view in the given database
+    ConcurrentMap<String, RelOptMaterialization> cq =
+        new ConcurrentHashMap<String, RelOptMaterialization>();
+    final ConcurrentMap<String, RelOptMaterialization> prevCq = materializedViews.putIfAbsent(
         materializedViewTable.getDbName(), cq);
     if (prevCq != null) {
       cq = prevCq;
     }
-    // Bail out if it already exists
-    final ViewKey vk = ViewKey.forTable(materializedViewTable);
-    if (cq.containsKey(vk)) {
-      return;
-    }
-    // Add to cache
+
+    // Start the process to add MV to the cache
+    // First we parse the view query and create the materialization object
     final String viewQuery = materializedViewTable.getViewExpandedText();
     final RelNode viewScan = createMaterializedViewScan(materializedViewTable);
     if (viewScan == null) {
       LOG.warn("Materialized view " + materializedViewTable.getCompleteName() +
               " ignored; error creating view replacement");
-      return;
+      return null;
     }
     final RelNode queryRel = parseQuery(viewQuery);
     if (queryRel == null) {
       LOG.warn("Materialized view " + materializedViewTable.getCompleteName() +
               " ignored; error parsing original query");
-      return;
+      return null;
     }
+
     RelOptMaterialization materialization = new RelOptMaterialization(viewScan, queryRel,
         null, viewScan.getTable().getQualifiedName());
-    cq.put(vk, materialization);
+    if (opType == OpType.CREATE) {
+      // You store the materialized view
+      cq.put(materializedViewTable.getTableName(), materialization);
+    } else {
+      // For LOAD, you only add it if it does exist as you might be loading an outdated MV
+      cq.putIfAbsent(materializedViewTable.getTableName(), materialization);
+    }
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Cached materialized view for rewriting: " + viewScan.getTable().getQualifiedName());
     }
-    return;
+    return materialization;
   }
 
   /**
@@ -207,10 +215,19 @@ public final class HiveMaterializedViewsRegistry {
    * @param materializedViewTable the materialized view to remove
    */
   public void dropMaterializedView(Table materializedViewTable) {
-    final ViewKey vk = ViewKey.forTable(materializedViewTable);
-    ConcurrentMap<ViewKey, RelOptMaterialization> dbMap = materializedViews.get(materializedViewTable.getDbName());
+    dropMaterializedView(materializedViewTable.getDbName(), materializedViewTable.getTableName());
+  }
+
+  /**
+   * Removes the materialized view from the cache.
+   *
+   * @param dbName the db for the materialized view to remove
+   * @param tableName the name for the materialized view to remove
+   */
+  public void dropMaterializedView(String dbName, String tableName) {
+    ConcurrentMap<String, RelOptMaterialization> dbMap = materializedViews.get(dbName);
     if (dbMap != null) {
-      dbMap.remove(vk);
+      dbMap.remove(tableName);
     }
   }
 
@@ -220,11 +237,11 @@ public final class HiveMaterializedViewsRegistry {
    * @param dbName the database
    * @return the collection of materialized views, or the empty collection if none
    */
-  Collection<RelOptMaterialization> getRewritingMaterializedViews(String dbName) {
+  RelOptMaterialization getRewritingMaterializedView(String dbName, String viewName) {
     if (materializedViews.get(dbName) != null) {
-      return Collections.unmodifiableCollection(materializedViews.get(dbName).values());
+      return materializedViews.get(dbName).get(viewName);
     }
-    return ImmutableList.of();
+    return null;
   }
 
   private static RelNode createMaterializedViewScan(Table viewTable) {
@@ -343,7 +360,9 @@ public final class HiveMaterializedViewsRegistry {
       final QueryState qs =
           new QueryState.Builder().withHiveConf(SessionState.get().getConf()).build();
       CalcitePlanner analyzer = new CalcitePlanner(qs);
-      analyzer.initCtx(new Context(SessionState.get().getConf()));
+      Context ctx = new Context(SessionState.get().getConf());
+      ctx.setIsLoadingMaterializedView(true);
+      analyzer.initCtx(ctx);
       analyzer.init(false);
       return analyzer.genLogicalPlan(node);
     } catch (Exception e) {
@@ -353,45 +372,6 @@ public final class HiveMaterializedViewsRegistry {
     }
   }
 
-  private static class ViewKey {
-    private String viewName;
-    private int creationDate;
-
-    private ViewKey(String viewName, int creationTime) {
-      this.viewName = viewName;
-      this.creationDate = creationTime;
-    }
-
-    public static ViewKey forTable(Table table) {
-      return new ViewKey(table.getTableName(), table.getCreateTime());
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if(this == obj) {
-        return true;
-      }
-      if((obj == null) || (obj.getClass() != this.getClass())) {
-        return false;
-      }
-      ViewKey viewKey = (ViewKey) obj;
-      return creationDate == viewKey.creationDate && Objects.equals(viewName, viewKey.viewName);
-    }
-
-    @Override
-    public int hashCode() {
-      int hash = 7;
-      hash = 31 * hash + creationDate;
-      hash = 31 * hash + viewName.hashCode();
-      return hash;
-    }
-
-    @Override
-    public String toString() {
-      return "ViewKey{" + viewName + "," + creationDate + "}";
-    }
-  }
-
   private static TableType obtainTableType(Table tabMetaData) {
     if (tabMetaData.getStorageHandler() != null &&
             tabMetaData.getStorageHandler().toString().equals(
@@ -405,4 +385,10 @@ public final class HiveMaterializedViewsRegistry {
     DRUID,
     NATIVE
   }
+
+  private enum OpType {
+    CREATE, //view just being created
+    LOAD // already created view being loaded
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 0debff6..027265e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
@@ -842,6 +843,21 @@ public class Table implements Serializable {
     tTable.setRewriteEnabled(rewriteEnabled);
   }
 
+  /**
+   * @return the creation metadata (only for materialized views)
+   */
+  public Map<String, BasicTxnInfo> getCreationMetadata() {
+    return tTable.getCreationMetadata();
+  }
+
+  /**
+   * @param creationMetadata
+   *          the creation metadata (only for materialized views)
+   */
+  public void setCreationMetadata(Map<String, BasicTxnInfo> creationMetadata) {
+    tTable.setCreationMetadata(creationMetadata);
+  }
+
   public void clearSerDeInfo() {
     tTable.getSd().getSerdeInfo().getParameters().clear();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 28b4cfe..572b95f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -89,9 +89,6 @@ import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
 import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
 import org.apache.calcite.rel.rules.ProjectMergeRule;
 import org.apache.calcite.rel.rules.ProjectRemoveRule;
-import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule;
-import org.apache.calcite.rel.rules.SemiJoinJoinTransposeRule;
-import org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
@@ -112,7 +109,6 @@ import org.apache.calcite.sql.SqlLiteral;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.SqlWindow;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.tools.Frameworks;
@@ -204,8 +200,8 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsRu
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsWithStatsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelDecorrelator;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelFieldTrimmer;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRemoveSqCountCheck;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRemoveGBYSemiJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRemoveSqCountCheck;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSemiJoinRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortJoinReduceRule;
@@ -1498,7 +1494,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // We disable it for CTAS and MV creation queries (trying to avoid any problem
       // due to data freshness)
       if (conf.getBoolVar(ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING) &&
-              !getQB().isMaterializedView() && !getQB().isCTAS()) {
+              !getQB().isMaterializedView() && !ctx.isLoadingMaterializedView() && !getQB().isCTAS()) {
         perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
         // Use Calcite cost model for view rewriting
         RelMetadataProvider calciteMdProvider = DefaultRelMetadataProvider.INSTANCE;
@@ -1507,7 +1503,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         // Add views to planner
         List<RelOptMaterialization> materializations = new ArrayList<>();
         try {
-          materializations = Hive.get().getRewritingMaterializedViews();
+          materializations = Hive.get().getValidMaterializedViews();
           // We need to use the current cluster for the scan operator on views,
           // otherwise the planner will throw an Exception (different planners)
           materializations = Lists.transform(materializations,

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index a1b4616..db86320 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -18,15 +18,29 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
+
+import java.io.FileNotFoundException;
+import java.io.Serializable;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
@@ -52,6 +66,8 @@ import org.apache.hadoop.hive.metastore.api.WMPool;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
@@ -61,13 +77,14 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.hooks.Entity.Type;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.hooks.Entity.Type;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
 import org.apache.hadoop.hive.ql.index.HiveIndex;
 import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.lib.Node;
@@ -101,10 +118,11 @@ import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
 import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
 import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -150,13 +168,12 @@ import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
-import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
-import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
 import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
@@ -172,25 +189,11 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.util.StringUtils;
-import java.io.FileNotFoundException;
-import java.io.Serializable;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
 
 /**
  * DDLSemanticAnalyzer.
@@ -4302,9 +4305,22 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     alterMVDesc.setRewriteEnableFlag(enableFlag);
 
     // It can be fully qualified name or use default database
-    Table tab = getTable(mvName, true);
-    inputs.add(new ReadEntity(tab));
-    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
+    Table materializedViewTable = getTable(mvName, true);
+
+    // One last test: if we are enabling the rewrite, we need to check that query
+    // only uses transactional (MM and ACID) tables
+    if (enableFlag) {
+      for (String tableName : materializedViewTable.getCreationMetadata().keySet()) {
+        Table table = getTable(tableName, true);
+        if (!AcidUtils.isAcidTable(table)) {
+          throw new SemanticException("Automatic rewriting for materialized view cannot "
+              + "be enabled if the materialized view uses non-transactional tables");
+        }
+      }
+    }
+
+    inputs.add(new ReadEntity(materializedViewTable));
+    outputs.add(new WriteEntity(materializedViewTable, WriteEntity.WriteType.DDL_EXCLUSIVE));
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         alterMVDesc), conf));
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 5a88a96..5799b07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -42,6 +42,7 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
+
 import org.antlr.runtime.ClassicToken;
 import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.Token;
@@ -71,6 +72,7 @@ import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
@@ -11649,7 +11651,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // all the information for semanticcheck
       validateCreateView();
 
-      if (!createVwDesc.isMaterialized()) {
+      if (createVwDesc.isMaterialized()) {
+        createVwDesc.setTablesUsed(getTablesUsed(pCtx));
+      } else {
         // Since we're only creating a view (not executing it), we don't need to
         // optimize or translate the plan (and in fact, those procedures can
         // interfere with the view creation). So skip the rest of this method.
@@ -11769,7 +11773,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   protected void saveViewDefinition() throws SemanticException {
     if (createVwDesc.isMaterialized() && createVwDesc.isReplace()) {
-      // This is a rebuild, there's nothing to do here.
+      // This is a rebuild, there's nothing to do here
       return;
     }
 
@@ -11888,6 +11892,18 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     createVwDesc.setViewExpandedText(expandedText);
   }
 
+  private List<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
+    List<String> tablesUsed = new ArrayList<>();
+    for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
+      Table table = topOp.getConf().getTableMetadata();
+      if (!table.isMaterializedTable() && !table.isView()) {
+        // Add to signature
+        tablesUsed.add(table.getFullyQualifiedName());
+      }
+    }
+    return tablesUsed;
+  }
+
   static List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
     List<FieldSchema> fieldSchema = convertRowSchemaToResultSetSchema(rr, false);
     ParseUtils.validateColumnNameUniqueness(fieldSchema);
@@ -12784,7 +12800,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     try {
       Table oldView = getTable(createVwDesc.getViewName(), false);
 
-      // Do not allow view to be defined on temp table
+      // Do not allow view to be defined on temp table or other materialized view
       Set<String> tableAliases = qb.getTabAliases();
       for (String alias : tableAliases) {
         try {
@@ -12792,6 +12808,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           if (table.isTemporary()) {
             throw new SemanticException("View definition references temporary table " + alias);
           }
+          if (table.isMaterializedView()) {
+            throw new SemanticException("View definition references materialized view " + alias);
+          }
+          if (createVwDesc.isMaterialized() && createVwDesc.isRewriteEnabled() &&
+              !AcidUtils.isAcidTable(table)) {
+            throw new SemanticException("Automatic rewriting for materialized view cannot "
+                + "be enabled if the materialized view uses non-transactional tables");
+          }
         } catch (HiveException ex) {
           throw new SemanticException(ex);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index 09aa82f..3de282d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -66,6 +65,7 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   private String serde; // only used for materialized views
   private String storageHandler; // only used for materialized views
   private Map<String, String> serdeProps; // only used for materialized views
+  private List<String> tablesUsed;  // only used for materialized views
   private ReplicationSpec replicationSpec = null;
 
   /**
@@ -245,6 +245,14 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
     this.ifNotExists = ifNotExists;
   }
 
+  public List<String> getTablesUsed() {
+    return tablesUsed;
+  }
+
+  public void setTablesUsed(List<String> tablesUsed) {
+    this.tablesUsed = tablesUsed;
+  }
+
   @Explain(displayName = "replace", displayOnlyOnTrue = true)
   public boolean isReplace() {
     return replace;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index 33e30bf..3305dfc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -325,7 +325,9 @@ public class ImportTableDesc {
   /**
    * @return whether this table is actually a view
    */
-  public boolean isView() { return table.isView(); }
+  public boolean isView() {
+    return table.isView();
+  }
 
   public boolean isMaterializedView() {
     return table.isMaterializedView();

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index 69b076a..21e6984 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -272,12 +272,13 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
 
         BasicStatsProcessor basicStatsProcessor = new BasicStatsProcessor(p, work, conf, followedColStats);
         basicStatsProcessor.collectFileStatus(wh);
-        Object res = basicStatsProcessor.process(statsAggregator);
-
+        Table res = (Table) basicStatsProcessor.process(statsAggregator);
         if (res == null) {
           return 0;
         }
-        db.alterTable(tableFullName, (Table) res, environmentContext);
+        // Stats task should not set creation signature
+        res.getTTable().unsetCreationMetadata();
+        db.alterTable(tableFullName, res, environmentContext);
 
         if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
           console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']');

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index bbd285d..bc35882 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -171,6 +172,8 @@ public class TestHive extends TestCase {
 
       tbl.setRewriteEnabled(false);
 
+      tbl.setCreationMetadata(new HashMap<String, BasicTxnInfo>());
+
       // create table
       setNullCreateTableGrants();
       try {
@@ -232,6 +235,8 @@ public class TestHive extends TestCase {
 
       tbl.setRewriteEnabled(false);
 
+      tbl.setCreationMetadata(new HashMap<String, BasicTxnInfo>());
+
       setNullCreateTableGrants();
       try {
         hm.createTable(tbl);

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite.q b/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite.q
new file mode 100644
index 0000000..bfa0b8f
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite.q
@@ -0,0 +1,10 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.strict.checks.cartesian.product=false;
+set hive.materializedview.rewriting=true;
+
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2));
+
+insert into cmv_basetable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view cmv_mat_view enable rewrite as select a, b, c from cmv_basetable;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite_2.q b/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite_2.q
new file mode 100644
index 0000000..9432918
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_no_transactional_rewrite_2.q
@@ -0,0 +1,12 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.strict.checks.cartesian.product=false;
+set hive.materializedview.rewriting=true;
+
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2));
+
+insert into cmv_basetable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view cmv_mat_view as select a, b, c from cmv_basetable;
+
+alter materialized view cmv_mat_view enable rewrite;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/druidmini_mv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druidmini_mv.q b/ql/src/test/queries/clientpositive/druidmini_mv.q
index 284c9c0..e059357 100644
--- a/ql/src/test/queries/clientpositive/druidmini_mv.q
+++ b/ql/src/test/queries/clientpositive/druidmini_mv.q
@@ -1,8 +1,11 @@
+-- SORT_QUERY_RESULTS
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
-set hive.stats.column.autogather=true;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
index 761903f..a97ce45 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
@@ -1,10 +1,11 @@
 -- SORT_QUERY_RESULTS
 
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
-set hive.stats.column.autogather=true;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),
@@ -13,6 +14,8 @@ insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1);
 
+analyze table cmv_basetable compute statistics for columns;
+
 create materialized view cmv_mat_view enable rewrite
 as select a, b, c from cmv_basetable where a = 2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q
index 9983bae..62aefbc 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q
@@ -1,8 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
-set hive.stats.column.autogather=true;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),
@@ -11,6 +12,8 @@ insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1);
 
+analyze table cmv_basetable compute statistics for columns;
+
 create materialized view cmv_mat_view enable rewrite
 as select b from cmv_basetable where c > 10.0 group by a, b, c;
 
@@ -47,12 +50,14 @@ select b from cmv_basetable group by b;
 
 select b from cmv_basetable group by b;
 
-create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable_2 values
  (1, 'alfred', 10.30, 2),
  (3, 'calvin', 978.76, 3);
 
+analyze table cmv_basetable_2 compute statistics for columns;
+
 create materialized view cmv_mat_view_5 enable rewrite
 as select cmv_basetable.a, cmv_basetable_2.c
    from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a)

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_3.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_3.q
index 6462d9a..408e662 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_3.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_3.q
@@ -1,8 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
-set hive.stats.column.autogather=true;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),
@@ -11,12 +12,16 @@ insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1);
 
-create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int);
+analyze table cmv_basetable compute statistics for columns;
+
+create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable_2 values
  (1, 'alfred', 10.30, 2),
  (3, 'calvin', 978.76, 3);
 
+analyze table cmv_basetable_2 compute statistics for columns;
+
 EXPLAIN
 CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS
   SELECT cmv_basetable.a, cmv_basetable_2.c
@@ -45,7 +50,9 @@ GROUP BY cmv_basetable.a, cmv_basetable_2.c;
 insert into cmv_basetable_2 values
  (3, 'charlie', 15.8, 1);
 
--- TODO: CANNOT USE THE VIEW, IT IS OUTDATED
+analyze table cmv_basetable_2 compute statistics for columns;
+
+-- CANNOT USE THE VIEW, IT IS OUTDATED
 EXPLAIN
 SELECT cmv_basetable.a
 FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -75,4 +82,66 @@ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
 WHERE cmv_basetable_2.c > 10.10
 GROUP BY cmv_basetable.a, cmv_basetable_2.c;
 
+DELETE FROM cmv_basetable_2 WHERE a = 3;
+
+-- CANNOT USE THE VIEW, IT IS OUTDATED
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+-- REBUILD
+ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+
+-- NOW IT CAN BE USED AGAIN
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+-- IRRELEVANT OPERATIONS
+create table cmv_irrelevant_table (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into cmv_irrelevant_table values
+ (1, 'alfred', 10.30, 2),
+ (3, 'charlie', 9.8, 1);
+
+analyze table cmv_irrelevant_table compute statistics for columns;
+
+-- IT CAN STILL BE USED
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
 drop materialized view cmv_mat_view;
+
+-- NOT USED ANYMORE
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
new file mode 100644
index 0000000..efc65c4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
@@ -0,0 +1,92 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.strict.checks.cartesian.product=false;
+set hive.materializedview.rewriting=true;
+
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into cmv_basetable values
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1);
+
+analyze table cmv_basetable compute statistics for columns;
+
+create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into cmv_basetable_2 values
+ (1, 'alfred', 10.30, 2),
+ (3, 'calvin', 978.76, 3);
+
+analyze table cmv_basetable_2 compute statistics for columns;
+
+-- CREATE VIEW WITH REWRITE DISABLED
+EXPLAIN
+CREATE MATERIALIZED VIEW cmv_mat_view AS
+  SELECT cmv_basetable.a, cmv_basetable_2.c
+  FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+  WHERE cmv_basetable_2.c > 10.0
+  GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+CREATE MATERIALIZED VIEW cmv_mat_view AS
+  SELECT cmv_basetable.a, cmv_basetable_2.c
+  FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+  WHERE cmv_basetable_2.c > 10.0
+  GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+-- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+insert into cmv_basetable_2 values
+ (3, 'charlie', 15.8, 1);
+
+analyze table cmv_basetable_2 compute statistics for columns;
+
+-- ENABLE FOR REWRITE
+EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
+
+ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
+
+-- CANNOT USE THE VIEW, IT IS OUTDATED
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+-- REBUILD
+EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+
+ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+
+-- NOW IT CAN BE USED AGAIN
+EXPLAIN
+SELECT cmv_basetable.a
+FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+SELECT cmv_basetable.a
+FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+WHERE cmv_basetable_2.c > 10.10
+GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+
+drop materialized view cmv_mat_view;

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q
index e4cdc22..20cf1fc 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q
@@ -1,3 +1,5 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
 set hive.stats.column.autogather=true;
@@ -5,7 +7,7 @@ set hive.stats.column.autogather=true;
 create database db1;
 use db1;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int);
+create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),
@@ -14,6 +16,8 @@ insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1);
 
+analyze table cmv_basetable compute statistics for columns;
+
 create database db2;
 use db2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/57d909c3/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
index 00da517..0177188 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
@@ -1,7 +1,24 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
 set hive.stats.column.autogather=true;
 
+CREATE TABLE `customer_ext`(
+  `c_custkey` bigint, 
+  `c_name` string, 
+  `c_address` string, 
+  `c_city` string, 
+  `c_nation` string, 
+  `c_region` string, 
+  `c_phone` string, 
+  `c_mktsegment` string)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/ssb/customer/' into table `customer_ext`;
+
 CREATE TABLE `customer`(
   `c_custkey` bigint, 
   `c_name` string, 
@@ -12,7 +29,35 @@ CREATE TABLE `customer`(
   `c_phone` string, 
   `c_mktsegment` string,
   primary key (`c_custkey`) disable rely)
-STORED AS ORC;
+STORED AS ORC
+TBLPROPERTIES ('transactional'='true');
+
+INSERT INTO `customer`
+SELECT * FROM `customer_ext`;
+
+CREATE TABLE `dates_ext`(
+  `d_datekey` bigint, 
+  `d_date` string, 
+  `d_dayofweek` string, 
+  `d_month` string, 
+  `d_year` int, 
+  `d_yearmonthnum` int, 
+  `d_yearmonth` string, 
+  `d_daynuminweek` int,
+  `d_daynuminmonth` int,
+  `d_daynuminyear` int,
+  `d_monthnuminyear` int,
+  `d_weeknuminyear` int,
+  `d_sellingseason` string,
+  `d_lastdayinweekfl` int,
+  `d_lastdayinmonthfl` int,
+  `d_holidayfl` int ,
+  `d_weekdayfl`int)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/ssb/date/' into table `dates_ext`;
 
 CREATE TABLE `dates`(
   `d_datekey` bigint, 
@@ -34,7 +79,27 @@ CREATE TABLE `dates`(
   `d_weekdayfl`int,
   primary key (`d_datekey`) disable rely
 )
-STORED AS ORC;
+STORED AS ORC
+TBLPROPERTIES ('transactional'='true');
+
+INSERT INTO `dates`
+SELECT * FROM `dates_ext`;
+
+CREATE TABLE `ssb_part_ext`(
+  `p_partkey` bigint, 
+  `p_name` string, 
+  `p_mfgr` string, 
+  `p_category` string, 
+  `p_brand1` string, 
+  `p_color` string, 
+  `p_type` string, 
+  `p_size` int, 
+  `p_container` string)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/ssb/part/' into table `ssb_part_ext`;
 
 CREATE TABLE `ssb_part`(
   `p_partkey` bigint, 
@@ -47,7 +112,25 @@ CREATE TABLE `ssb_part`(
   `p_size` int, 
   `p_container` string,
   primary key (`p_partkey`) disable rely)
-STORED AS ORC;
+STORED AS ORC
+TBLPROPERTIES ('transactional'='true');
+
+INSERT INTO `ssb_part`
+SELECT * FROM `ssb_part_ext`;
+
+CREATE TABLE `supplier_ext`(
+  `s_suppkey` bigint, 
+  `s_name` string, 
+  `s_address` string, 
+  `s_city` string, 
+  `s_nation` string, 
+  `s_region` string, 
+  `s_phone` string)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/ssb/supplier/' into table `supplier_ext`;
 
 CREATE TABLE `supplier`(
   `s_suppkey` bigint, 
@@ -58,7 +141,35 @@ CREATE TABLE `supplier`(
   `s_region` string, 
   `s_phone` string,
   primary key (`s_suppkey`) disable rely)
-STORED AS ORC;
+STORED AS ORC
+TBLPROPERTIES ('transactional'='true');
+
+INSERT INTO `supplier`
+SELECT * FROM `supplier_ext`;
+
+CREATE TABLE `lineorder_ext`(
+  `lo_orderkey` bigint, 
+  `lo_linenumber` int, 
+  `lo_custkey` bigint not null disable rely,
+  `lo_partkey` bigint not null disable rely,
+  `lo_suppkey` bigint not null disable rely,
+  `lo_orderdate` bigint not null disable rely,
+  `lo_ordpriority` string, 
+  `lo_shippriority` string, 
+  `lo_quantity` double, 
+  `lo_extendedprice` double, 
+  `lo_ordtotalprice` double, 
+  `lo_discount` double, 
+  `lo_revenue` double, 
+  `lo_supplycost` double, 
+  `lo_tax` double, 
+  `lo_commitdate` bigint, 
+  `lo_shipmode` string)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/ssb/lineorder/' into table `lineorder_ext`;
 
 CREATE TABLE `lineorder`(
   `lo_orderkey` bigint, 
@@ -83,7 +194,11 @@ CREATE TABLE `lineorder`(
   constraint fk2 foreign key (`lo_orderdate`) references `dates`(`d_datekey`) disable rely,
   constraint fk3 foreign key (`lo_partkey`) references `ssb_part`(`p_partkey`) disable rely,
   constraint fk4 foreign key (`lo_suppkey`) references `supplier`(`s_suppkey`) disable rely)
-STORED AS ORC;
+STORED AS ORC
+TBLPROPERTIES ('transactional'='true');
+
+INSERT INTO `lineorder`
+SELECT * FROM `lineorder_ext`;
 
 analyze table customer compute statistics for columns;
 analyze table dates compute statistics for columns;