You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2016/09/15 14:18:36 UTC

[4/4] hive git commit: HIVE-14249: Add simple materialized views with manual rebuilds (1) (Alan Gates, reviewed by Jesus Camacho Rodriguez)

HIVE-14249: Add simple materialized views with manual rebuilds (1) (Alan Gates, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/438109cb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/438109cb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/438109cb

Branch: refs/heads/master
Commit: 438109cb90f126e6043533bba060f166bf55c954
Parents: 7f8263e
Author: Alan Gates <ga...@apache.org>
Authored: Wed Jul 20 12:37:31 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Sep 15 15:14:44 2016 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  13 +-
 .../apache/hive/jdbc/HiveDatabaseMetaData.java  |   2 +
 .../apache/hadoop/hive/metastore/TableType.java |   2 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   3 +
 .../apache/hadoop/hive/ql/QueryProperties.java  |  15 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  75 +++--
 .../apache/hadoop/hive/ql/metadata/Table.java   |   9 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |  12 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |   2 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |   2 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  27 ++
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   2 +-
 .../hadoop/hive/ql/parse/ParseContext.java      |  27 +-
 .../org/apache/hadoop/hive/ql/parse/QB.java     |  16 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 203 ++++++++-----
 .../hadoop/hive/ql/parse/StorageFormat.java     |  17 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      | 105 ++++---
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java  |   7 +
 .../hadoop/hive/ql/plan/CreateViewDesc.java     |  54 ++++
 .../hadoop/hive/ql/plan/HiveOperation.java      |   2 +
 .../hadoop/hive/ql/plan/LoadFileDesc.java       |  10 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |  41 +++
 .../authorization/plugin/HiveOperationType.java |   1 +
 .../plugin/sqlstd/Operation2Privilege.java      |   4 +
 ...ialized_view_authorization_create_no_grant.q |  15 +
 ...d_view_authorization_create_no_select_perm.q |  14 +
 ...materialized_view_authorization_drop_other.q |  14 +
 ...rialized_view_authorization_no_select_perm.q |  14 +
 ...alized_view_authorization_rebuild_no_grant.q |  20 ++
 ...erialized_view_authorization_rebuild_other.q |  14 +
 .../clientnegative/materialized_view_delete.q   |  10 +
 .../clientnegative/materialized_view_insert.q   |   6 +
 .../clientnegative/materialized_view_load.q     |   7 +
 .../materialized_view_replace_with_view.q       |   8 +
 .../clientnegative/materialized_view_update.q   |  10 +
 .../test/queries/clientnegative/view_delete.q   |  10 +
 .../test/queries/clientnegative/view_update.q   |  10 +
 .../clientpositive/authorization_view_sqlstd.q  |  86 ------
 .../materialized_view_authorization_sqlstd.q    |  58 ++++
 .../clientpositive/materialized_view_create.q   |  31 ++
 .../clientpositive/materialized_view_drop.q     |   7 +
 .../clientpositive/materialized_view_rebuild.q  |  13 +
 .../clientpositive/view_authorization_sqlstd.q  |  89 ++++++
 ...zed_view_authorization_create_no_grant.q.out |  26 ++
 ...ew_authorization_create_no_select_perm.q.out |  20 ++
 ...rialized_view_authorization_drop_other.q.out |  30 ++
 ...ized_view_authorization_no_select_perm.q.out |  30 ++
 ...ed_view_authorization_rebuild_no_grant.q.out |  42 +++
 ...lized_view_authorization_rebuild_other.q.out |  30 ++
 .../materialized_view_delete.q.out              |  19 ++
 .../materialized_view_insert.q.out              |  19 ++
 .../clientnegative/materialized_view_load.q.out |  19 ++
 .../materialized_view_replace_with_view.q.out   |  30 ++
 .../materialized_view_update.q.out              |  19 ++
 .../results/clientnegative/view_delete.q.out    |  19 ++
 .../results/clientnegative/view_update.q.out    |  19 ++
 .../authorization_view_sqlstd.q.out             | 269 -----------------
 ...materialized_view_authorization_sqlstd.q.out | 195 ++++++++++++
 .../materialized_view_create.q.out              | 173 +++++++++++
 .../clientpositive/materialized_view_drop.q.out |  39 +++
 .../materialized_view_rebuild.q.out             |  67 +++++
 .../view_authorization_sqlstd.q.out             | 295 +++++++++++++++++++
 .../cli/operation/ClassicTableTypeMapping.java  |   5 +
 63 files changed, 1931 insertions(+), 521 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index d6944ee..d68c4fb 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1104,10 +1104,15 @@ public class HiveConf extends Configuration {
     HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"),
         "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
     HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
-  new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
-  "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
-  "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
-  "for all tables."),
+        new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
+        "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
+        "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
+        "for all tables."),
+    HIVEMATERIALIZEDVIEWFILEFORMAT("hive.materializedview.fileformat", "ORC",
+        new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
+        "File format for CREATE MATERIALIZED VIEW statement."),
+    HIVEMATERIALIZEDVIEWSERDE("hive.materializedview.serde",
+        "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Serde used for materialized views"),
     HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"),
         "Default file format for storing result of the query."),
     HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
index 9d73470..fa984f4 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
@@ -728,6 +728,8 @@ public class HiveDatabaseMetaData implements DatabaseMetaData {
       return "VIEW";
     } else if (hivetabletype.equals(TableType.EXTERNAL_TABLE.toString())) {
       return "EXTERNAL TABLE";
+    } else if (hivetabletype.equals(TableType.MATERIALIZED_VIEW.toString())) {
+      return "MATERIALIZED VIEW";
     } else {
       return hivetabletype;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
index 56eeaa0..e9e16d7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TableType.java
@@ -22,5 +22,5 @@ package org.apache.hadoop.hive.metastore;
  * Typesafe enum for types of tables described by the metastore.
  */
 public enum TableType {
-  MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW, INDEX_TABLE
+  MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW, INDEX_TABLE, MATERIALIZED_VIEW
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 001f852..af1583f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -450,6 +450,9 @@ public enum ErrorMsg {
   ACID_NOT_ENOUGH_HISTORY(10327, "Not enough history available for ({0},{1}).  " +
     "Oldest available base: {2}", true),
   INVALID_COLUMN_NAME(10328, "Invalid column name"),
+  REPLACE_VIEW_WITH_MATERIALIZED(10400, "Attempt to replace view {0} with materialized view", true),
+  REPLACE_MATERIALIZED_WITH_VIEW(10401, "Attempt to replace materialized view {0} with view", true),
+  UPDATE_DELETE_VIEW(10402, "You cannot update or delete records in a view"),
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
index 3bc9432..650792b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
@@ -65,6 +65,8 @@ public class QueryProperties {
   private boolean multiDestQuery;
   private boolean filterWithSubQuery;
 
+  // True if this statement creates or replaces a materialized view
+  private boolean isMaterializedView;
 
   public boolean isQuery() {
     return query;
@@ -260,6 +262,19 @@ public class QueryProperties {
     return this.filterWithSubQuery;
   }
 
+  /**
+   * True indicates this statement create or replaces a materialized view, not that it is a query
+   * against a materialized view.
+   * @return
+   */
+  public boolean isMaterializedView() {
+    return isMaterializedView;
+  }
+
+  public void setMaterializedView(boolean isMaterializedView) {
+    this.isMaterializedView = isMaterializedView;
+  }
+
   public void clear() {
     query = false;
     analyzeCommand = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 569c19e..68d5fde 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4041,7 +4041,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     // Get the existing table
     Table oldtbl = db.getTable(crtTbl.getLikeTableName());
     Table tbl;
-    if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW) {
+    if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW ||
+        oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) {
       String targetTableName = crtTbl.getTableName();
       tbl=db.newTable(targetTableName);
 
@@ -4187,39 +4188,50 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
     Table oldview = db.getTable(crtView.getViewName(), false);
     if (crtView.getOrReplace() && oldview != null) {
-      // replace existing view
-      // remove the existing partition columns from the field schema
-      oldview.setViewOriginalText(crtView.getViewOriginalText());
-      oldview.setViewExpandedText(crtView.getViewExpandedText());
-      oldview.setFields(crtView.getSchema());
-      if (crtView.getComment() != null) {
-        oldview.setProperty("comment", crtView.getComment());
-      }
-      if (crtView.getTblProps() != null) {
-        oldview.getTTable().getParameters().putAll(crtView.getTblProps());
-      }
-      oldview.setPartCols(crtView.getPartCols());
-      if (crtView.getInputFormat() != null) {
-        oldview.setInputFormatClass(crtView.getInputFormat());
-      }
-      if (crtView.getOutputFormat() != null) {
-        oldview.setOutputFormatClass(crtView.getOutputFormat());
-      }
-      oldview.checkValidity(null);
-      try {
-        db.alterTable(crtView.getViewName(), oldview, null);
-      } catch (InvalidOperationException e) {
-        throw new HiveException(e);
+      if (!crtView.isMaterialized()) {
+        // replace existing view
+        // remove the existing partition columns from the field schema
+        oldview.setViewOriginalText(crtView.getViewOriginalText());
+        oldview.setViewExpandedText(crtView.getViewExpandedText());
+        oldview.setFields(crtView.getSchema());
+        if (crtView.getComment() != null) {
+          oldview.setProperty("comment", crtView.getComment());
+        }
+        if (crtView.getTblProps() != null) {
+          oldview.getTTable().getParameters().putAll(crtView.getTblProps());
+        }
+        oldview.setPartCols(crtView.getPartCols());
+        if (crtView.getInputFormat() != null) {
+          oldview.setInputFormatClass(crtView.getInputFormat());
+        }
+        if (crtView.getOutputFormat() != null) {
+          oldview.setOutputFormatClass(crtView.getOutputFormat());
+        }
+        oldview.checkValidity(null);
+        try {
+          db.alterTable(crtView.getViewName(), oldview, null);
+        } catch (InvalidOperationException e) {
+          throw new HiveException(e);
+        }
+        work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
+      } else {
+        // This is a replace, so we need an exclusive lock
+        work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE));
       }
-      work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
     } else {
       // create new view
       Table tbl = db.newTable(crtView.getViewName());
-      tbl.setTableType(TableType.VIRTUAL_VIEW);
+      if (crtView.isMaterialized()) {
+        tbl.setTableType(TableType.MATERIALIZED_VIEW);
+      } else {
+        tbl.setTableType(TableType.VIRTUAL_VIEW);
+      }
       tbl.setSerializationLib(null);
       tbl.clearSerDeInfo();
       tbl.setViewOriginalText(crtView.getViewOriginalText());
-      tbl.setViewExpandedText(crtView.getViewExpandedText());
+      if (!crtView.isMaterialized()) {
+        tbl.setViewExpandedText(crtView.getViewExpandedText());
+      }
       tbl.setFields(crtView.getSchema());
       if (crtView.getComment() != null) {
         tbl.setProperty("comment", crtView.getComment());
@@ -4239,6 +4251,15 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         tbl.setOutputFormatClass(crtView.getOutputFormat());
       }
 
+      if (crtView.isMaterialized()) {
+        // Short circuit the checks that the input format is valid, this is configured for all
+        // materialized views and doesn't change so we don't need to check it constantly.
+        tbl.getSd().setInputFormat(crtView.getInputFormat());
+        tbl.getSd().setOutputFormat(crtView.getOutputFormat());
+        tbl.getSd().setSerdeInfo(new SerDeInfo(crtView.getSerde(), crtView.getSerde(),
+                Collections.<String, String>emptyMap()));
+      }
+
       db.createTable(tbl, crtView.getIfNotExists());
       work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index e0d35d3..ea90889 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -209,8 +209,11 @@ public class Table implements Serializable {
     }
 
     if (isView()) {
+      assert (getViewOriginalText() != null);
+      assert (getViewExpandedText() != null);
+    } else if (isMaterializedView()) {
       assert(getViewOriginalText() != null);
-      assert(getViewExpandedText() != null);
+      assert(getViewExpandedText() == null);
     } else {
       assert(getViewOriginalText() == null);
       assert(getViewExpandedText() == null);
@@ -824,6 +827,10 @@ public class Table implements Serializable {
     return TableType.VIRTUAL_VIEW.equals(getTableType());
   }
 
+  public boolean isMaterializedView() {
+    return TableType.MATERIALIZED_VIEW.equals(getTableType());
+  }
+
   /**
    * @return whether this table is actually an index table
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index e0e9b12..dd86abd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -1000,7 +1000,9 @@ public abstract class BaseSemanticAnalyzer {
       assert (ast.getToken().getType() == HiveParser.TOK_TAB
           || ast.getToken().getType() == HiveParser.TOK_TABLE_PARTITION
           || ast.getToken().getType() == HiveParser.TOK_TABTYPE
-          || ast.getToken().getType() == HiveParser.TOK_CREATETABLE);
+          || ast.getToken().getType() == HiveParser.TOK_CREATETABLE
+          || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW
+          || ast.getToken().getType() == HiveParser.TOK_REBUILD_MATERIALIZED_VIEW);
       int childIndex = 0;
       numDynParts = 0;
 
@@ -1012,7 +1014,9 @@ public abstract class BaseSemanticAnalyzer {
           tableName = conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX)
               + tableName;
         }
-        if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE) {
+        if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE &&
+            ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW &&
+            ast.getToken().getType() != HiveParser.TOK_REBUILD_MATERIALIZED_VIEW) {
           tableHandle = db.getTable(tableName);
         }
       } catch (InvalidTableException ite) {
@@ -1024,7 +1028,9 @@ public abstract class BaseSemanticAnalyzer {
       }
 
       // get partition metadata if partition specified
-      if (ast.getChildCount() == 2 && ast.getToken().getType() != HiveParser.TOK_CREATETABLE) {
+      if (ast.getChildCount() == 2 && ast.getToken().getType() != HiveParser.TOK_CREATETABLE &&
+          ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW &&
+          ast.getToken().getType() != HiveParser.TOK_REBUILD_MATERIALIZED_VIEW) {
         childIndex = 1;
         ASTNode partspec = (ASTNode) ast.getChild(1);
         partitions = new ArrayList<Partition>();

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 9329e00..988c58e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -1160,7 +1160,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       }
     }
 
-    storageFormat.fillDefaultStorageFormat(false);
+    storageFormat.fillDefaultStorageFormat(false, false);
     if (indexTableName == null) {
       indexTableName = MetaStoreUtils.getIndexTableName(qTabName[0], qTabName[1], indexName);
       indexTableName = qTabName[0] + "." + indexTableName; // on same database with base table

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index a3fcaa0..167f7a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -127,7 +127,7 @@ public class EximUtil {
   }
 
   static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws SemanticException {
-    if (table.isView()) {
+    if (table.isView() || table.isMaterializedView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }
     if (table.isNonNative()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index df596ff..92dfc90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -250,6 +250,8 @@ TOK_ALTERVIEW_DROPPROPERTIES;
 TOK_ALTERVIEW_ADDPARTS;
 TOK_ALTERVIEW_DROPPARTS;
 TOK_ALTERVIEW_RENAME;
+TOK_REBUILD_MATERIALIZED_VIEW;
+TOK_CREATE_MATERIALIZED_VIEW;
 TOK_VIEWPARTCOLS;
 TOK_EXPLAIN;
 TOK_EXPLAIN_SQ_REWRITE;
@@ -784,7 +786,9 @@ ddlStatement
     | showStatement
     | metastoreCheck
     | createViewStatement
+    | createMaterializedViewStatement
     | dropViewStatement
+    | dropMaterializedViewStatement
     | createFunctionStatement
     | createMacroStatement
     | createIndexStatement
@@ -1030,6 +1034,7 @@ alterStatement
 @after { popMsg(state); }
     : KW_ALTER KW_TABLE tableName alterTableStatementSuffix -> ^(TOK_ALTERTABLE tableName alterTableStatementSuffix)
     | KW_ALTER KW_VIEW tableName KW_AS? alterViewStatementSuffix -> ^(TOK_ALTERVIEW tableName alterViewStatementSuffix)
+    | KW_ALTER KW_MATERIALIZED KW_VIEW tableName KW_REBUILD -> ^(TOK_REBUILD_MATERIALIZED_VIEW tableName)
     | KW_ALTER KW_INDEX alterIndexStatementSuffix -> alterIndexStatementSuffix
     | KW_ALTER (KW_DATABASE|KW_SCHEMA) alterDatabaseStatementSuffix -> alterDatabaseStatementSuffix
     ;
@@ -1778,6 +1783,22 @@ createViewStatement
         )
     ;
 
+createMaterializedViewStatement
+@init {
+    pushMsg("create materialized view statement", state);
+}
+@after { popMsg(state); }
+    : KW_CREATE KW_MATERIALIZED KW_VIEW (ifNotExists)? name=tableName
+        tableComment? tablePropertiesPrefixed?
+        KW_AS selectStatementWithCTE
+    -> ^(TOK_CREATE_MATERIALIZED_VIEW $name 
+         ifNotExists?
+         tableComment?
+         tablePropertiesPrefixed?
+         selectStatementWithCTE
+        )
+    ;
+
 viewPartition
 @init { pushMsg("view partition specification", state); }
 @after { popMsg(state); }
@@ -1791,6 +1812,12 @@ dropViewStatement
     : KW_DROP KW_VIEW ifExists? viewName -> ^(TOK_DROPVIEW viewName ifExists?)
     ;
 
+dropMaterializedViewStatement
+@init { pushMsg("drop materialized view statement", state); }
+@after { popMsg(state); }
+    : KW_DROP KW_MATERIALIZED KW_VIEW ifExists? viewName -> ^(TOK_DROPTABLE viewName ifExists?)
+    ;
+
 showFunctionIdentifier
 @init { pushMsg("identifier for show function statement", state); }
 @after { popMsg(state); }

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index a49b813..a7005f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -199,7 +199,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     // initialize destination table/partition
     TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
 
-   if (ts.tableHandle.isView()) {
+    if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }
     if (ts.tableHandle.isNonNative()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 4353d3a..35f34da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
+import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc;
@@ -116,6 +117,7 @@ public class ParseContext {
 
   private AnalyzeRewriteContext analyzeRewrite;
   private CreateTableDesc createTableDesc;
+  private CreateViewDesc createViewDesc;
   private boolean reduceSinkAddedBySortedDynPartition;
 
   private Map<SelectOperator, Table> viewProjectToViewSchema;  
@@ -129,20 +131,14 @@ public class ParseContext {
 
   /**
    * @param conf
-   * @param qb
-   *          current QB
-   * @param ast
-   *          current parse tree
    * @param opToPartPruner
    *          map from table scan operator to partition pruner
    * @param opToPartList
    * @param topOps
    *          list of operators for the top query
-   * @param opParseCtx
-   *          operator parse context - contains a mapping from operator to
-   *          operator parse state (row resolver etc.)
    * @param joinOps
    *          context needed join processing (map join specifically)
+   * @param smbMapJoinOps
    * @param loadTableWork
    *          list of destination tables being loaded
    * @param loadFileWork
@@ -154,13 +150,19 @@ public class ParseContext {
    * @param destTableId
    * @param listMapJoinOpsNoReducer
    *          list of map join operators with no reducer
-   * @param groupOpToInputTables
    * @param prunedPartitions
    * @param opToSamplePruner
    *          operator to sample pruner map
    * @param globalLimitCtx
    * @param nameToSplitSample
    * @param rootTasks
+   * @param opToPartToSkewedPruner
+   * @param viewAliasToInput
+   * @param reduceSinkOperatorsAddedByEnforceBucketingSorting
+   * @param analyzeRewrite
+   * @param createTableDesc
+   * @param createViewDesc
+   * @param queryProperties
    */
   public ParseContext(
       QueryState queryState,
@@ -183,8 +185,8 @@ public class ParseContext {
       Map<String, ReadEntity> viewAliasToInput,
       List<ReduceSinkOperator> reduceSinkOperatorsAddedByEnforceBucketingSorting,
       AnalyzeRewriteContext analyzeRewrite, CreateTableDesc createTableDesc,
-      QueryProperties queryProperties, Map<SelectOperator, Table> viewProjectToTableSchema,
-      Set<FileSinkDesc> acidFileSinks) {
+      CreateViewDesc createViewDesc, QueryProperties queryProperties,
+      Map<SelectOperator, Table> viewProjectToTableSchema, Set<FileSinkDesc> acidFileSinks) {
     this.queryState = queryState;
     this.conf = queryState.getConf();
     this.opToPartPruner = opToPartPruner;
@@ -213,6 +215,7 @@ public class ParseContext {
         reduceSinkOperatorsAddedByEnforceBucketingSorting;
     this.analyzeRewrite = analyzeRewrite;
     this.createTableDesc = createTableDesc;
+    this.createViewDesc = createViewDesc;
     this.queryProperties = queryProperties;
     this.viewProjectToViewSchema = viewProjectToTableSchema;
     this.needViewColumnAuthorization = viewProjectToTableSchema != null
@@ -582,6 +585,10 @@ public class ParseContext {
     this.createTableDesc = createTableDesc;
   }
 
+  public CreateViewDesc getCreateViewDesc() {
+    return createViewDesc;
+  }
+
   public void setReduceSinkAddedBySortedDynPartition(
       final boolean reduceSinkAddedBySortedDynPartition) {
     this.reduceSinkAddedBySortedDynPartition = reduceSinkAddedBySortedDynPartition;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
index 7e732f3..59d537f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
@@ -32,6 +32,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
+import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 
 /**
  * Implementation of the query block.
@@ -63,6 +64,9 @@ public class QB {
   private boolean insideView;
   private Set<String> aliasInsideView;
 
+  // If this is a materialized view, this stores the view descriptor
+  private CreateViewDesc viewDesc;
+
   // used by PTFs
   /*
    * This map maintains the PTFInvocationSpec for each PTF chain invocation in this QB.
@@ -404,6 +408,18 @@ public class QB {
     return havingClauseSubQueryPredicate;
   }
 
+  public CreateViewDesc getViewDesc() {
+    return viewDesc;
+  }
+
+  public void setViewDesc(CreateViewDesc viewDesc) {
+    this.viewDesc = viewDesc;
+  }
+
+  public boolean isMaterializedView() {
+    return viewDesc != null && viewDesc.isMaterialized();
+  }
+
   void addEncryptedTargetTablePath(Path p) {
     if(encryptedTargetTablePaths == null) {
       encryptedTargetTablePaths = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index b071ddf..ab2966b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -469,7 +469,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject,
         opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
         opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
-        analyzeRewrite, tableDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
+        analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
   }
 
   public CompilationOpContext getOpContext() {
@@ -2060,7 +2060,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       switch (ast.getToken().getType()) {
         case HiveParser.TOK_TAB: {
           TableSpec ts = new TableSpec(db, conf, ast);
-          if (ts.tableHandle.isView()) {
+          if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
             throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
           }
 
@@ -2098,7 +2098,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           if ((!qb.getParseInfo().getIsSubQ())
               && (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_TMP_FILE)) {
 
-            if (qb.isCTAS()) {
+            if (qb.isCTAS() || qb.isMaterializedView()) {
               qb.setIsQuery(false);
               ctx.setResDir(null);
               ctx.setResFile(null);
@@ -4403,12 +4403,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   /**
    * Returns the GenericUDAFInfo struct for the aggregation.
    *
-   * @param aggName
-   *          The name of the UDAF.
+   * @param evaluator
+   * @param emode
    * @param aggParameters
    *          The exprNodeDesc of the original parameters
-   * @param aggTree
-   *          The ASTNode node of the UDAF in the query.
    * @return GenericUDAFInfo
    * @throws SemanticException
    *           when the UDAF is not found or has problems.
@@ -4714,6 +4712,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * Generate the GroupByOperator for the Query Block (parseInfo.getXXX(dest)).
    * The new GroupByOperator will be a child of the reduceSinkOperatorInfo.
    *
+   * @param parseInfo
+   * @param dest
+   * @param reduceSinkOperatorInfo
    * @param mode
    *          The mode of the aggregation (MERGEPARTIAL, PARTIAL2)
    * @param genericUDAFEvaluators
@@ -4723,7 +4724,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    *          list of grouping sets
    * @param groupingSetsPresent
    *          whether grouping sets are present in this query
-   * @param groupingSetsConsumedCurrentMR
+   * @param groupingSetsNeedAdditionalMRJob
    *          whether grouping sets are consumed by this group by
    * @return the new GroupByOperator
    */
@@ -6796,10 +6797,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // table command rather than taking the default value
       List<FieldSchema> field_schemas = null;
       CreateTableDesc tblDesc = qb.getTableDesc();
+      CreateViewDesc viewDesc = qb.getViewDesc();
       if (tblDesc != null) {
         field_schemas = new ArrayList<FieldSchema>();
         destTableIsTemporary = tblDesc.isTemporary();
         destTableIsMaterialization = tblDesc.isMaterialization();
+      } else if (viewDesc != null) {
+        field_schemas = new ArrayList<FieldSchema>();
+        destTableIsTemporary = false;
       }
 
       boolean first = true;
@@ -6858,6 +6863,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // update the create table descriptor with the resulting schema.
       if (tblDesc != null) {
         tblDesc.setCols(new ArrayList<FieldSchema>(field_schemas));
+      } else if (viewDesc != null) {
+        viewDesc.setSchema(new ArrayList<FieldSchema>(field_schemas));
       }
 
       boolean isDestTempFile = true;
@@ -6869,11 +6876,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
 
       boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE);
-      loadFileWork.add(new LoadFileDesc(tblDesc, queryTmpdir, dest_path, isDfsDir, cols,
+      loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols,
           colTypes));
 
       if (tblDesc == null) {
-        if (qb.getIsQuery()) {
+        if (viewDesc != null) {
+          table_desc = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
+        } else if (qb.getIsQuery()) {
           String fileFormat;
           if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
               fileFormat = "SequenceFile";
@@ -10434,7 +10443,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    *
    * @param source
    * @param dest
-   * @param outputColNames
+   * @param colExprMap
+   * @param outputInternalColNames
    *          - a list to which the new internal column names will be added, in
    *          the same order as in the dest row resolver
    */
@@ -10687,10 +10697,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     // 3. analyze create view command
-    if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW
-        || (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW && ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
+    if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW ||
+        ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW ||
+        ast.getToken().getType() == HiveParser.TOK_REBUILD_MATERIALIZED_VIEW ||
+        (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW &&
+            ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
       child = analyzeCreateView(ast, qb);
-      queryState.setCommandType(HiveOperation.CREATEVIEW);
       if (child == null) {
         return false;
       }
@@ -10835,7 +10847,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner,
         globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner,
         viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
-        analyzeRewrite, tableDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
+        analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
 
     // 5. Take care of view creation
     if (createVwDesc != null) {
@@ -10846,32 +10858,34 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
       // validate the create view statement at this point, the createVwDesc gets
       // all the information for semanticcheck
-      validateCreateView(createVwDesc);
+      validateCreateView();
 
-      // Since we're only creating a view (not executing it), we don't need to
-      // optimize or translate the plan (and in fact, those procedures can
-      // interfere with the view creation). So skip the rest of this method.
-      ctx.setResDir(null);
-      ctx.setResFile(null);
+      if (!createVwDesc.isMaterialized()) {
+        // Since we're only creating a view (not executing it), we don't need to
+        // optimize or translate the plan (and in fact, those procedures can
+        // interfere with the view creation). So skip the rest of this method.
+        ctx.setResDir(null);
+        ctx.setResFile(null);
 
-      try {
-        PlanUtils.addInputsForView(pCtx);
-      } catch (HiveException e) {
-        throw new SemanticException(e);
-      }
+        try {
+          PlanUtils.addInputsForView(pCtx);
+        } catch (HiveException e) {
+          throw new SemanticException(e);
+        }
 
-      // Generate lineage info for create view statements
-      // if LineageLogger hook is configured.
-      if (HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS).contains(
-          "org.apache.hadoop.hive.ql.hooks.LineageLogger")) {
-        ArrayList<Transform> transformations = new ArrayList<Transform>();
-        transformations.add(new HiveOpConverterPostProc());
-        transformations.add(new Generator());
-        for (Transform t : transformations) {
-          pCtx = t.transform(pCtx);
+        // Generate lineage info for create view statements
+        // if LineageLogger hook is configured.
+        if (HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS).contains(
+            "org.apache.hadoop.hive.ql.hooks.LineageLogger")) {
+          ArrayList<Transform> transformations = new ArrayList<Transform>();
+          transformations.add(new HiveOpConverterPostProc());
+          transformations.add(new Generator());
+          for (Transform t : transformations) {
+            pCtx = t.transform(pCtx);
+          }
         }
+        return;
       }
-      return;
     }
 
     // 6. Generate table access stats if required
@@ -11002,6 +11016,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private void saveViewDefinition() throws SemanticException {
 
+    if (createVwDesc.isMaterialized() && createVwDesc.getOrReplace()) {
+      // This is a rebuild, there's nothing to do here.
+      return;
+    }
+
     // Make a copy of the statement's result schema, since we may
     // modify it below as part of imposing view column names.
     List<FieldSchema> derivedSchema =
@@ -11112,7 +11131,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     createVwDesc.setSchema(derivedSchema);
-    createVwDesc.setViewExpandedText(expandedText);
+    if (!createVwDesc.isMaterialized()) {
+      // materialized views don't store the expanded text as they won't be rewritten at query time.
+      createVwDesc.setViewExpandedText(expandedText);
+    }
   }
 
   static List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
@@ -11496,8 +11518,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     RowFormatParams rowFormatParams = new RowFormatParams();
     StorageFormat storageFormat = new StorageFormat(conf);
 
-    LOG.info("Creating table " + dbDotTab + " position="
-        + ast.getCharPositionInLine());
+    LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
     int numCh = ast.getChildCount();
 
     /*
@@ -11539,6 +11560,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           command_type = CTLT;
         }
         break;
+
       case HiveParser.TOK_QUERY: // CTAS
         if (command_type == CTLT) {
           throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
@@ -11629,7 +11651,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         throw new SemanticException("Unrecognized command.");
     }
 
-    storageFormat.fillDefaultStorageFormat(isExt);
+    storageFormat.fillDefaultStorageFormat(isExt, false);
 
     if ((command_type == CTAS) && (storageFormat.getStorageHandler() != null)) {
       throw new SemanticException(ErrorMsg.CREATE_NON_NATIVE_AS.getMsg());
@@ -11782,6 +11804,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       qb.setTableDesc(tableDesc);
 
       return selectStmt;
+
     default:
       throw new SemanticException("Unrecognized command.");
     }
@@ -11797,8 +11820,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     outputs.add(new WriteEntity(t, WriteEntity.WriteType.DDL_NO_LOCK));
   }
 
-  private ASTNode analyzeCreateView(ASTNode ast, QB qb)
-      throws SemanticException {
+  private ASTNode analyzeCreateView(ASTNode ast, QB qb) throws SemanticException {
     String[] qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
     String dbDotTable = getDotName(qualTabName);
     List<FieldSchema> cols = null;
@@ -11809,6 +11831,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     ASTNode selectStmt = null;
     Map<String, String> tblProps = null;
     List<String> partColNames = null;
+    boolean isRebuild = ast.getToken().getType() == HiveParser.TOK_REBUILD_MATERIALIZED_VIEW;
+    boolean isMaterialized = ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW
+        || isRebuild;
+    StorageFormat storageFormat = storageFormat = new StorageFormat(conf);
+    storageFormat.fillDefaultStorageFormat(false, isMaterialized);
 
     LOG.info("Creating view " + dbDotTable + " position="
         + ast.getCharPositionInLine());
@@ -11852,18 +11879,40 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       orReplace = true;
     }
 
-    StorageFormat defaultFmt = new StorageFormat(conf);
-    defaultFmt.fillDefaultStorageFormat(false);
-    createVwDesc = new CreateViewDesc(
-      dbDotTable, cols, comment, defaultFmt.getInputFormat(),
-      defaultFmt.getOutputFormat(), tblProps, partColNames,
-      ifNotExists, orReplace, isAlterViewAs);
-
     unparseTranslator.enable();
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        createVwDesc), conf));
 
-    addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW);
+    if (isMaterialized) {
+      createVwDesc = new CreateViewDesc(dbDotTable, cols, comment, tblProps, partColNames,
+              ifNotExists, orReplace || isRebuild, isAlterViewAs, storageFormat.getInputFormat(),
+              storageFormat.getOutputFormat(), storageFormat.getSerde());
+      addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW);
+      queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW);
+      qb.setViewDesc(createVwDesc);
+    } else {
+      createVwDesc = new CreateViewDesc(
+              dbDotTable, cols, comment, tblProps, partColNames,
+              ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(),
+              storageFormat.getOutputFormat(), storageFormat.getSerde());
+      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+              createVwDesc), conf));
+
+      addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW);
+      queryState.setCommandType(HiveOperation.CREATEVIEW);
+    }
+
+    if (isRebuild) {
+      // We need to go lookup the table and get the select statement and then parse it.
+      try {
+        Table tab = db.getTable(qualTabName[0], qualTabName[1]);
+        String viewText = tab.getViewOriginalText();
+        ParseDriver pd = new ParseDriver();
+        ASTNode tree = pd.parse(viewText, ctx, false);
+        selectStmt = ParseUtils.findRootNonNullToken(tree);
+      } catch (Exception e) {
+        throw new SemanticException(e);
+      }
+    }
+
     return selectStmt;
   }
 
@@ -11874,7 +11923,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   // validate the create view statement
   // the statement could be CREATE VIEW, REPLACE VIEW, or ALTER VIEW AS SELECT
   // check semantic conditions
-  private void validateCreateView(CreateViewDesc createVwDesc)
+  private void validateCreateView()
     throws SemanticException {
     try {
       Table oldView = getTable(createVwDesc.getViewName(), false);
@@ -11903,8 +11952,21 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       //replace view
       if (createVwDesc.getOrReplace() && oldView != null) {
 
+        // Don't allow swapping between virtual and materialized view in replace
+        if (oldView.getTableType().equals(TableType.VIRTUAL_VIEW) && createVwDesc.isMaterialized()) {
+          throw new SemanticException(ErrorMsg.REPLACE_VIEW_WITH_MATERIALIZED,
+              oldView.getTableName());
+        }
+
+        if (oldView.getTableType().equals(TableType.MATERIALIZED_VIEW) &&
+            !createVwDesc.isMaterialized()) {
+          throw new SemanticException(ErrorMsg.REPLACE_MATERIALIZED_WITH_VIEW,
+              oldView.getTableName());
+        }
+
         // Existing table is not a view
-        if (!oldView.getTableType().equals(TableType.VIRTUAL_VIEW)) {
+        if (!oldView.getTableType().equals(TableType.VIRTUAL_VIEW) &&
+            !oldView.getTableType().equals(TableType.MATERIALIZED_VIEW)) {
           String tableNotViewErrorMsg =
             "The following is an existing table, not a view: " +
             createVwDesc.getViewName();
@@ -11912,26 +11974,28 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             ErrorMsg.EXISTING_TABLE_IS_NOT_VIEW.getMsg(tableNotViewErrorMsg));
         }
 
-        // if old view has partitions, it could not be replaced
-        String partitionViewErrorMsg =
-          "The following view has partition, it could not be replaced: " +
-          createVwDesc.getViewName();
-        try {
-          if ((createVwDesc.getPartCols() == null ||
-            createVwDesc.getPartCols().isEmpty() ||
-            !createVwDesc.getPartCols().equals(oldView.getPartCols())) &&
-            !oldView.getPartCols().isEmpty() &&
-            !db.getPartitions(oldView).isEmpty()) {
+        if (!createVwDesc.isMaterialized()) {
+          // if old view has partitions, it could not be replaced
+          String partitionViewErrorMsg =
+              "The following view has partition, it could not be replaced: " +
+                  createVwDesc.getViewName();
+          try {
+            if ((createVwDesc.getPartCols() == null ||
+                createVwDesc.getPartCols().isEmpty() ||
+                !createVwDesc.getPartCols().equals(oldView.getPartCols())) &&
+                !oldView.getPartCols().isEmpty() &&
+                !db.getPartitions(oldView).isEmpty()) {
+              throw new SemanticException(
+                  ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
+            }
+          } catch (HiveException e) {
             throw new SemanticException(
-              ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
+                ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
           }
-        } catch (HiveException e) {
-          throw new SemanticException(
-            ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
         }
       }
     } catch (HiveException e) {
-      throw new SemanticException(e.getMessage());
+      throw new SemanticException(e.getMessage(), e);
     }
   }
 
@@ -13040,6 +13104,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       queryProperties.setHasOuterOrderBy(!qb.getParseInfo().getIsSubQ() &&
               !qb.getParseInfo().getDestToOrderBy().isEmpty());
       queryProperties.setOuterQueryLimit(qb.getParseInfo().getOuterQueryLimit());
+      queryProperties.setMaterializedView(qb.getViewDesc() != null);
     }
   }
   private void warn(String msg) {

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
index 48aca4d..d3b955c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
@@ -104,13 +104,22 @@ public class StorageFormat {
     }
   }
 
-  protected void fillDefaultStorageFormat(boolean isExternal) throws SemanticException {
+  protected void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializedView)
+      throws  SemanticException {
     if ((inputFormat == null) && (storageHandler == null)) {
-      String defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT);
-      String defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT);
+      String defaultFormat;
+      String defaultManagedFormat;
+      if (isMaterializedView) {
+        defaultFormat = defaultManagedFormat =
+            HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWFILEFORMAT);
+        serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWSERDE);
+      } else {
+        defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT);
+        defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT);
+      }
 
       if (!isExternal && !"none".equals(defaultManagedFormat)) {
-	defaultFormat = defaultManagedFormat;
+        defaultFormat = defaultManagedFormat;
       }
 
       if (StringUtils.isBlank(defaultFormat)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index fb2b992..f781390 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContex
 import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
+import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
@@ -76,8 +77,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.thrift.ThriftFormatter;
 import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe;
 
-import akka.util.Collections;
-
 import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
 
@@ -226,18 +225,25 @@ public abstract class TaskCompiler {
 
       boolean oneLoadFile = true;
       for (LoadFileDesc lfd : loadFileWork) {
-        if (pCtx.getQueryProperties().isCTAS()) {
+        if (pCtx.getQueryProperties().isCTAS() || pCtx.getQueryProperties().isMaterializedView()) {
           assert (oneLoadFile); // should not have more than 1 load file for
           // CTAS
           // make the movetask's destination directory the table's destination.
           Path location;
-          String loc = pCtx.getCreateTable().getLocation();
+          String loc = pCtx.getQueryProperties().isCTAS() ?
+                  pCtx.getCreateTable().getLocation() : null;
           if (loc == null) {
             // get the table's default location
+            // Always use default location for materialized view
             Path targetPath;
             try {
-              String[] names = Utilities.getDbTableName(
-                      pCtx.getCreateTable().getTableName());
+              String protoName = null;
+              if (pCtx.getQueryProperties().isCTAS()) {
+                protoName = pCtx.getCreateTable().getTableName();
+              } else if (pCtx.getQueryProperties().isMaterializedView()) {
+                protoName = pCtx.getCreateViewDesc().getViewName();
+              }
+              String[] names = Utilities.getDbTableName(protoName);
               if (!db.databaseExists(names[0])) {
                 throw new SemanticException("ERROR: The database " + names[0]
                     + " does not exist.");
@@ -314,41 +320,15 @@ public abstract class TaskCompiler {
 
       crtTblDesc.validate(conf);
 
-      // clear the mapredWork output file from outputs for CTAS
-      // DDLWork at the tail of the chain will have the output
-      Iterator<WriteEntity> outIter = outputs.iterator();
-      while (outIter.hasNext()) {
-        switch (outIter.next().getType()) {
-        case DFS_DIR:
-        case LOCAL_DIR:
-          outIter.remove();
-          break;
-        default:
-          break;
-        }
-      }
       Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(
           inputs, outputs, crtTblDesc), conf);
-
-      // find all leaf tasks and make the DDLTask as a dependent task of all of
-      // them
-      HashSet<Task<? extends Serializable>> leaves = new LinkedHashSet<Task<? extends Serializable>>();
-      getLeafTasks(rootTasks, leaves);
-      assert (leaves.size() > 0);
-      for (Task<? extends Serializable> task : leaves) {
-        if (task instanceof StatsTask) {
-          // StatsTask require table to already exist
-          for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()) {
-            parentOfStatsTask.addDependentTask(crtTblTask);
-          }
-          for (Task<? extends Serializable> parentOfCrtTblTask : crtTblTask.getParentTasks()) {
-            parentOfCrtTblTask.removeDependentTask(task);
-          }
-          crtTblTask.addDependentTask(task);
-        } else {
-          task.addDependentTask(crtTblTask);
-        }
-      }
+      patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask);
+    } else if (pCtx.getQueryProperties().isMaterializedView()) {
+      // generate a DDL task and make it a dependent task of the leaf
+      CreateViewDesc viewDesc = pCtx.getCreateViewDesc();
+      Task<? extends Serializable> crtViewTask = TaskFactory.get(new DDLWork(
+          inputs, outputs, viewDesc), conf);
+      patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask);
     }
 
     if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
@@ -376,6 +356,44 @@ public abstract class TaskCompiler {
     }
   }
 
+  private void patchUpAfterCTASorMaterializedView(final List<Task<? extends Serializable>>  rootTasks,
+                                                  final HashSet<WriteEntity> outputs,
+                                                  Task<? extends Serializable> createTask) {
+    // clear the mapredWork output file from outputs for CTAS
+    // DDLWork at the tail of the chain will have the output
+    Iterator<WriteEntity> outIter = outputs.iterator();
+    while (outIter.hasNext()) {
+      switch (outIter.next().getType()) {
+      case DFS_DIR:
+      case LOCAL_DIR:
+        outIter.remove();
+        break;
+      default:
+        break;
+      }
+    }
+
+    // find all leaf tasks and make the DDLTask as a dependent task of all of
+    // them
+    HashSet<Task<? extends Serializable>> leaves =
+        new LinkedHashSet<Task<? extends Serializable>>();
+    getLeafTasks(rootTasks, leaves);
+    assert (leaves.size() > 0);
+    for (Task<? extends Serializable> task : leaves) {
+      if (task instanceof StatsTask) {
+        // StatsTask require table to already exist
+        for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()) {
+          parentOfStatsTask.addDependentTask(createTask);
+        }
+        for (Task<? extends Serializable> parentOfCrtTblTask : createTask.getParentTasks()) {
+          parentOfCrtTblTask.removeDependentTask(task);
+        }
+        createTask.addDependentTask(task);
+      } else {
+        task.addDependentTask(createTask);
+      }
+    }
+  }
 
   /**
    * A helper function to generate a column stats task on top of map-red task. The column stats
@@ -385,7 +403,11 @@ public abstract class TaskCompiler {
    * This method generates a plan with a column stats task on top of map-red task and sets up the
    * appropriate metadata to be used during execution.
    *
-   * @param qb
+   * @param analyzeRewrite
+   * @param loadTableWork
+   * @param loadFileWork
+   * @param rootTasks
+   * @param outerQueryLimit
    */
   @SuppressWarnings("unchecked")
   protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite,
@@ -494,7 +516,8 @@ public abstract class TaskCompiler {
         pCtx.getNameToSplitSample(), pCtx.getSemanticInputs(), rootTasks,
         pCtx.getOpToPartToSkewedPruner(), pCtx.getViewAliasToInput(),
         pCtx.getReduceSinkOperatorsAddedByEnforceBucketingSorting(),
-        pCtx.getAnalyzeRewrite(), pCtx.getCreateTable(), pCtx.getQueryProperties(), pCtx.getViewProjectToTableSchema(),
+        pCtx.getAnalyzeRewrite(), pCtx.getCreateTable(), pCtx.getCreateViewDesc(),
+        pCtx.getQueryProperties(), pCtx.getViewProjectToTableSchema(),
         pCtx.getAcidSinks());
     clone.setFetchTask(pCtx.getFetchTask());
     clone.setLineageInfo(pCtx.getLineageInfo());

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
index 34d83ef..5b874e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -141,6 +142,12 @@ public class UpdateDeleteSemanticAnalyzer extends SemanticAnalyzer {
       throw new SemanticException(e.getMessage(), e);
     }
 
+    if (mTable.getTableType() == TableType.VIRTUAL_VIEW ||
+        mTable.getTableType() == TableType.MATERIALIZED_VIEW) {
+      LOG.error("Table " + getDotName(tableName) + " is a view or materialized view");
+      throw new SemanticException(ErrorMsg.UPDATE_DELETE_VIEW.getMsg());
+    }
+
     List<FieldSchema> partCols = mTable.getPartCols();
     List<String> bucketingCols = mTable.getBucketCols();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index 81c4f77..3ddbb1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -48,6 +48,8 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   private boolean ifNotExists;
   private boolean orReplace;
   private boolean isAlterViewAs;
+  private boolean isMaterialized;
+  private String serde; // only used for materialized views
 
   /**
    * For serialization only.
@@ -55,6 +57,17 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   public CreateViewDesc() {
   }
 
+  /**
+   * Used to create a virtual view descriptor.
+   * @param viewName
+   * @param schema
+   * @param comment
+   * @param tblProps
+   * @param partColNames
+   * @param ifNotExists
+   * @param orReplace
+   * @param isAlterViewAs
+   */
   public CreateViewDesc(String viewName, List<FieldSchema> schema,
       String comment, String inputFormat,
       String outputFormat, Map<String, String> tblProps,
@@ -70,6 +83,39 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
     this.ifNotExists = ifNotExists;
     this.orReplace = orReplace;
     this.isAlterViewAs = isAlterViewAs;
+    this.isMaterialized = false;
+  }
+
+  /**
+   * Used to create a materialized view descriptor
+   * @param viewName
+   * @param schema
+   * @param comment
+   * @param tblProps
+   * @param partColNames
+   * @param ifNotExists
+   * @param orReplace
+   * @param isAlterViewAs
+   * @param inputFormat
+   * @param outputFormat
+   * @param serde
+   */
+  public CreateViewDesc(String viewName, List<FieldSchema> schema, String comment,
+                        Map<String, String> tblProps, List<String> partColNames,
+                        boolean ifNotExists, boolean orReplace, boolean isAlterViewAs,
+                        String inputFormat, String outputFormat, String serde) {
+    this.viewName = viewName;
+    this.schema = schema;
+    this.tblProps = tblProps;
+    this.partColNames = partColNames;
+    this.comment = comment;
+    this.ifNotExists = ifNotExists;
+    this.orReplace = orReplace;
+    this.isAlterViewAs = isAlterViewAs;
+    this.isMaterialized = true;
+    this.inputFormat = inputFormat;
+    this.outputFormat = outputFormat;
+    this.serde = serde;
   }
 
   @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -194,4 +240,12 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
     this.outputFormat = outputFormat;
   }
 
+  public boolean isMaterialized() {
+    return isMaterialized;
+  }
+
+  public String getSerde() {
+    return serde;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index d2746ae..d5b1c9d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -78,6 +78,8 @@ public enum HiveOperation {
   CREATEMACRO("CREATEMACRO", null, null),
   DROPMACRO("DROPMACRO", null, null),
   CREATEVIEW("CREATEVIEW", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}),
+  CREATE_MATERIALIZED_VIEW("CREATEVIEW", new Privilege[]{Privilege.SELECT}, new
+      Privilege[]{Privilege.CREATE}),
   DROPVIEW("DROPVIEW", null, new Privilege[]{Privilege.DROP}),
   CREATEINDEX("CREATEINDEX", null, null),
   DROPINDEX("DROPINDEX", null, null),

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
index df153a2..9a868a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
@@ -39,15 +39,19 @@ public class LoadFileDesc extends LoadDesc implements Serializable {
   public LoadFileDesc() {
   }
 
-  public LoadFileDesc(final CreateTableDesc createTableDesc, final Path sourcePath,
-      final Path targetDir,
-      final boolean isDfsDir, final String columns, final String columnTypes) {
+  public LoadFileDesc(final CreateTableDesc createTableDesc, final CreateViewDesc  createViewDesc,
+                      final Path sourcePath, final Path targetDir, final boolean isDfsDir,
+                      final String columns, final String columnTypes) {
     this(sourcePath, targetDir, isDfsDir, columns, columnTypes);
     if (createTableDesc != null && createTableDesc.getDatabaseName() != null
         && createTableDesc.getTableName() != null) {
       destinationCreateTable = (createTableDesc.getTableName().contains(".") ? "" : createTableDesc
           .getDatabaseName() + ".")
           + createTableDesc.getTableName();
+    } else if (createViewDesc != null) {
+      // The work is already done in analyzeCreateView to assure that the view name is fully
+      // qualified.
+      destinationCreateTable = createViewDesc.getViewName();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 5dc3aa6..b15ad34 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -380,6 +380,47 @@ public final class PlanUtils {
     return ret;
   }
 
+ /**
+   * Generate a table descriptor from a createViewDesc.
+   */
+  public static TableDesc getTableDesc(CreateViewDesc crtViewDesc, String cols, String colTypes) {
+    TableDesc ret;
+
+    try {
+      Class serdeClass = JavaUtils.loadClass(crtViewDesc.getSerde());
+      ret = getTableDesc(serdeClass, new String(LazySerDeParameters.DefaultSeparators), cols,
+          colTypes, false,  false);
+
+      // set other table properties
+      /*
+      TODO - I don't think I need any of this
+      Properties properties = ret.getProperties();
+
+      if (crtTblDesc.getTableName() != null && crtTblDesc.getDatabaseName() != null) {
+        properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME,
+            crtTblDesc.getTableName());
+      }
+
+      if (crtTblDesc.getTblProps() != null) {
+        properties.putAll(crtTblDesc.getTblProps());
+      }
+       */
+
+      // replace the default input & output file format with those found in
+      // crtTblDesc
+      Class<? extends InputFormat> inClass =
+          (Class<? extends InputFormat>)JavaUtils.loadClass(crtViewDesc.getInputFormat());
+      Class<? extends HiveOutputFormat> outClass =
+          (Class<? extends HiveOutputFormat>)JavaUtils.loadClass(crtViewDesc.getOutputFormat());
+
+      ret.setInputFileFormatClass(inClass);
+      ret.setOutputFileFormatClass(outClass);
+    } catch (ClassNotFoundException e) {
+      throw new RuntimeException("Unable to find class in getTableDesc: " + e.getMessage(), e);
+    }
+    return ret;
+  }
+
   /**
    * Generate the table descriptor of MetadataTypedColumnsetSerDe with the
    * separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
index c507f67..884b129 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
@@ -82,6 +82,7 @@ public enum HiveOperationType {
   CREATEMACRO,
   DROPMACRO,
   CREATEVIEW,
+  CREATE_MATERIALIZED_VIEW,
   DROPVIEW,
   CREATEINDEX,
   DROPINDEX,

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
index bbe28ab..ddf1e66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
@@ -332,6 +332,10 @@ public class Operation2Privilege {
         new PrivRequirement(SEL_GRANT_AR, IOType.INPUT),
         new PrivRequirement(OWNER_PRIV_AR, HivePrivilegeObjectType.DATABASE)));
 
+    op2Priv.put(HiveOperationType.CREATE_MATERIALIZED_VIEW, PrivRequirement.newPrivRequirementList(
+        new PrivRequirement(SEL_GRANT_AR, IOType.INPUT),
+        new PrivRequirement(OWNER_PRIV_AR, HivePrivilegeObjectType.DATABASE)));
+
     op2Priv.put(HiveOperationType.SHOWFUNCTIONS, PrivRequirement.newIOPrivRequirement
 (null, null));
     op2Priv.put(HiveOperationType.SHOWINDEXES, PrivRequirement.newIOPrivRequirement

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_grant.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_grant.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_grant.q
new file mode 100644
index 0000000..079baff
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_grant.q
@@ -0,0 +1,15 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvcng_gtable (a int, b varchar(256), c decimal(10,2));
+
+insert into amvcng_gtable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+grant select on table amvcng_gtable to user user2;
+
+set user.name=user2;
+
+create materialized view amvcng_gmat_view as select a, c from amvcng_gtable;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_select_perm.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_select_perm.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_select_perm.q
new file mode 100644
index 0000000..4de525c
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_create_no_select_perm.q
@@ -0,0 +1,14 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvnsp_table (a int, b varchar(256), c decimal(10,2));
+
+insert into amvnsp_table values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+
+set user.name=user2;
+
+create materialized view amvnsp_mat_view as select a, c from amvnsp_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_drop_other.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_drop_other.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_drop_other.q
new file mode 100644
index 0000000..c9a5930
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_drop_other.q
@@ -0,0 +1,14 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvdo_table (a int, b varchar(256), c decimal(10,2));
+
+insert into amvdo_table values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view amvdo_mat_view as select a, c from amvdo_table;
+
+set user.name=user2;
+drop materialized view amvdo_mat_view;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_no_select_perm.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_no_select_perm.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_no_select_perm.q
new file mode 100644
index 0000000..8428152
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_no_select_perm.q
@@ -0,0 +1,14 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvnsp_table (a int, b varchar(256), c decimal(10,2));
+
+insert into amvnsp_table values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view amvnsp_mat_view as select a, c from amvnsp_table;
+
+set user.name=user2;
+select * from amvnsp_mat_view;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
new file mode 100644
index 0000000..a2e7d38
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
@@ -0,0 +1,20 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvrng_table (a int, b varchar(256), c decimal(10,2));
+
+insert into amvrng_table values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+grant select on table amvrng_table to user user2 with grant option;
+
+set user.name=user2;
+create materialized view amvrng_mat_view as select a, c from amvrng_table;
+
+set user.name=user1;
+revoke grant option for select on table amvrng_table from user user2;
+
+set user.name=user2;
+alter materialized view amvrng_mat_view rebuild;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_other.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_other.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_other.q
new file mode 100644
index 0000000..7c2d145
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_other.q
@@ -0,0 +1,14 @@
+set hive.test.authz.sstd.hs2.mode=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+create table amvro_table (a int, b varchar(256), c decimal(10,2));
+
+insert into amvro_table values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view amvro_mat_view as select a, c from amvro_table;
+
+set user.name=user2;
+alter materialized view amvro_mat_view rebuild;

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_delete.q b/ql/src/test/queries/clientnegative/materialized_view_delete.q
new file mode 100644
index 0000000..f557df9
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_delete.q
@@ -0,0 +1,10 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table dmv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create materialized view dmv_mat_view as select a, b, c from dmv_basetable;
+
+delete from dmv_mat_view where b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_insert.q b/ql/src/test/queries/clientnegative/materialized_view_insert.q
new file mode 100644
index 0000000..2daae52
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_insert.q
@@ -0,0 +1,6 @@
+create table imv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create materialized view imv_mat_view as select a, b, c from imv_basetable;
+
+insert into imv_mat_view values (1, 'fred', 3.14);

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_load.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_load.q b/ql/src/test/queries/clientnegative/materialized_view_load.q
new file mode 100644
index 0000000..a8d0dfd
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_load.q
@@ -0,0 +1,7 @@
+create table lmv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create materialized view lmv_mat_view as select a, b, c from lmv_basetable;
+
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE lmv_mat_view;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_replace_with_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_replace_with_view.q b/ql/src/test/queries/clientnegative/materialized_view_replace_with_view.q
new file mode 100644
index 0000000..7c972f5
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_replace_with_view.q
@@ -0,0 +1,8 @@
+create table rmvwv_basetable (a int, b varchar(256), c decimal(10,2));
+
+insert into rmvwv_basetable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8);
+
+create materialized view rmvwv_mat_view as select a, b, c from rmvwv_basetable;
+
+create or replace view rmvwv_mat_view as select a, c from rmvwv_basetable;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/materialized_view_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_update.q b/ql/src/test/queries/clientnegative/materialized_view_update.q
new file mode 100644
index 0000000..8245ef0
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/materialized_view_update.q
@@ -0,0 +1,10 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table umv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create materialized view umv_mat_view as select a, b, c from umv_basetable;
+
+update umv_mat_view set b = 'joe' where b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/view_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/view_delete.q b/ql/src/test/queries/clientnegative/view_delete.q
new file mode 100644
index 0000000..c9dc985
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/view_delete.q
@@ -0,0 +1,10 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table dv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create view dv_view as select a, b, c from dv_basetable;
+
+delete from dv_view where b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientnegative/view_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/view_update.q b/ql/src/test/queries/clientnegative/view_update.q
new file mode 100644
index 0000000..3a54ccb
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/view_update.q
@@ -0,0 +1,10 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table uv_basetable (a int, b varchar(256), c decimal(10,2));
+
+
+create view uv_view as select a, b, c from uv_basetable;
+
+update uv_view set b = 'joe' where b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/438109cb/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q b/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
deleted file mode 100644
index 14044bf..0000000
--- a/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
+++ /dev/null
@@ -1,86 +0,0 @@
-set hive.test.authz.sstd.hs2.mode=true;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
-set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
-set hive.security.authorization.enabled=true;
-set user.name=user1;
-
--- Test view authorization , and 'show grant' variants
-
-create table t1(i int, j int, k int);
-show grant user user1 on table t1;
-
--- protecting certain columns
-create view vt1 as select i,k from t1;
-
--- protecting certain rows
-create view vt2 as select * from t1 where i > 1;
-
-show grant user user1 on all;
-
---view grant to user
--- try with and without table keyword
-
-grant select on vt1 to user user2;
-grant insert on table vt1 to user user3;
-
-set user.name=user2;
-show grant user user2 on table vt1;
-set user.name=user3;
-show grant user user3 on table vt1;
-
-
-set user.name=user2;
-
-explain authorization select * from vt1;
-select * from vt1;
-
--- verify input objects required does not include table
--- even if view is within a sub query
-select * from (select * from vt1) a;
-
-select * from vt1 union all select * from vt1;
-
-set user.name=user1;
-
-grant all on table vt2 to user user2;
-
-set user.name=user2;
-show grant user user2 on table vt2;
-show grant user user2 on all;
-set user.name=user1;
-
-revoke all on vt2 from user user2;
-
-set user.name=user2;
-show grant user user2 on table vt2;
-
-
-set user.name=hive_admin_user;
-set role admin;
-show grant on table vt2;
-
-set user.name=user1;
-revoke select on table vt1 from user user2;
-
-set user.name=user2;
-show grant user user2 on table vt1;
-show grant user user2 on all;
-
-set user.name=user3;
--- grant privileges on roles for view, after next statement
-show grant user user3 on table vt1;
-
-set user.name=hive_admin_user;
-show current roles;
-set role ADMIN;
-create role role_v;
-grant  role_v to user user4 ;
-show role grant user user4;
-show roles;
-
-grant all on table vt2 to role role_v;
-show grant role role_v on table vt2;
-
-revoke delete on table vt2 from role role_v;
-show grant role role_v on table vt2;
-show grant on table vt2;