You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ar...@apache.org on 2018/07/25 19:28:12 UTC

[01/10] impala git commit: IMPALA-6174: [DOCS] Fixed the seed data type for RAND and RANDOM functions

Repository: impala
Updated Branches:
  refs/heads/master ac4acf1b7 -> cec33fa0a


IMPALA-6174: [DOCS] Fixed the seed data type for RAND and RANDOM functions

Change-Id: If6393bd618a26148dd668b3323c32af263637e14
Reviewed-on: http://gerrit.cloudera.org:8080/11023
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/02389d4d
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/02389d4d
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/02389d4d

Branch: refs/heads/master
Commit: 02389d4dd225573eaace0263bc84ea7cddff3f47
Parents: ac4acf1
Author: Alex Rodoni <ar...@cloudera.com>
Authored: Mon Jul 23 15:37:18 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Tue Jul 24 18:36:59 2018 +0000

----------------------------------------------------------------------
 docs/topics/impala_math_functions.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/02389d4d/docs/topics/impala_math_functions.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_math_functions.xml b/docs/topics/impala_math_functions.xml
index 78147c8..9ac8d33 100644
--- a/docs/topics/impala_math_functions.xml
+++ b/docs/topics/impala_math_functions.xml
@@ -1147,9 +1147,9 @@ select pmod(5,-2);
       <dlentry id="rand">
 
         <dt>
-          <codeph>rand()</codeph>, <codeph>rand(int seed)</codeph>,
+          <codeph>rand()</codeph>, <codeph>rand(bigint seed)</codeph>,
           <codeph rev="2.3.0" id="random">random()</codeph>,
-          <codeph rev="2.3.0">random(int seed)</codeph>
+          <codeph rev="2.3.0">random(bigint seed)</codeph>
         </dt>
 
         <dd>


[08/10] impala git commit: IMPALA-7257. Support Kudu tables in LocalCatalog

Posted by ar...@apache.org.
IMPALA-7257. Support Kudu tables in LocalCatalog

This adds support for querying Kudu tables when --use_local_catalog is
enabled.

With this change, most of the kudu e2e tests pass. Those that don't are
related to missing support for CREATE TABLE AS SELECT, which currently
has some downcasts to 'Db' causing issues. That will be addressed in a
separate patch along with fixing CTAS for FS-backed tables.

Change-Id: I5b6a317ee895e43e00ade953e814867b56b4e6dd
Reviewed-on: http://gerrit.cloudera.org:8080/10912
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Reviewed-by: Tianyi Wang <tw...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/c333b552
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/c333b552
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/c333b552

Branch: refs/heads/master
Commit: c333b55260bb5183fbf8e112cc1b3864bd7b383b
Parents: def5c88
Author: Todd Lipcon <to...@cloudera.com>
Authored: Fri Jun 29 15:35:29 2018 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Wed Jul 25 19:27:26 2018 +0000

----------------------------------------------------------------------
 .../AlterTableAddDropRangePartitionStmt.java    |   8 +-
 .../analysis/AlterTableAddPartitionStmt.java    |   3 +-
 .../analysis/AlterTableAddReplaceColsStmt.java  |   4 +-
 .../impala/analysis/AlterTableAlterColStmt.java |   6 +-
 .../analysis/AlterTableDropPartitionStmt.java   |   4 +-
 .../analysis/AlterTableSetFileFormatStmt.java   |   4 +-
 .../analysis/AlterTableSetLocationStmt.java     |   4 +-
 .../analysis/AlterTableSetTblProperties.java    |   5 +-
 .../impala/analysis/AlterTableSortByStmt.java   |   4 +-
 .../org/apache/impala/analysis/Analyzer.java    |   5 +-
 .../analysis/CreateTableAsSelectStmt.java       |   3 +-
 .../org/apache/impala/analysis/InsertStmt.java  |  31 ++-
 .../impala/analysis/KuduPartitionExpr.java      |   9 +-
 .../org/apache/impala/analysis/ModifyStmt.java  |  10 +-
 .../apache/impala/analysis/ShowStatsStmt.java   |   8 +-
 .../org/apache/impala/analysis/ToSqlUtils.java  |   5 +-
 .../apache/impala/analysis/TupleDescriptor.java |   4 +-
 .../org/apache/impala/catalog/FeKuduTable.java  | 221 +++++++++++++++++++
 .../org/apache/impala/catalog/KuduTable.java    | 188 ++--------------
 .../impala/catalog/local/LocalCatalog.java      |  11 +-
 .../impala/catalog/local/LocalFsTable.java      |  20 +-
 .../impala/catalog/local/LocalKuduTable.java    | 202 +++++++++++++++++
 .../apache/impala/catalog/local/LocalTable.java | 189 +++++++++-------
 .../apache/impala/catalog/local/LocalView.java  |   5 +-
 .../impala/planner/DistributedPlanner.java      |   8 +-
 .../org/apache/impala/planner/KuduScanNode.java |   6 +-
 .../java/org/apache/impala/planner/Planner.java |   6 +-
 .../impala/planner/SingleNodePlanner.java       |   4 +-
 .../org/apache/impala/planner/TableSink.java    |   4 +-
 .../org/apache/impala/service/Frontend.java     |  12 +-
 .../impala/service/KuduCatalogOpExecutor.java   |   4 +-
 .../java/org/apache/impala/util/KuduUtil.java   |   6 +-
 .../impala/catalog/local/LocalCatalogTest.java  |  37 +++-
 33 files changed, 697 insertions(+), 343 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
index 6c62df5..44c7091 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
@@ -20,8 +20,8 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -88,12 +88,12 @@ public class AlterTableAddDropRangePartitionStmt extends AlterTableStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
     FeTable table = getTargetTable();
-    if (!(table instanceof KuduTable)) {
+    if (!(table instanceof FeKuduTable)) {
       throw new AnalysisException(String.format("Table %s does not support range " +
           "partitions: RANGE %s", table.getFullName(), rangePartitionSpec_.toSql()));
     }
-    KuduTable kuduTable = (KuduTable) table;
-    List<String> colNames = kuduTable.getRangePartitioningColNames();
+    FeKuduTable kuduTable = (FeKuduTable) table;
+    List<String> colNames = FeKuduTable.Utils.getRangePartitioningColNames(kuduTable);
     if (colNames.isEmpty()) {
       throw new AnalysisException(String.format("Cannot add/drop partition %s: " +
           "Kudu table %s doesn't have a range-based partitioning.",

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index 9c1a035..4b2356a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Sets;
 
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
@@ -84,7 +85,7 @@ public class AlterTableAddPartitionStmt extends AlterTableStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
     FeTable table = getTargetTable();
-    if (table instanceof KuduTable) {
+    if (table instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported for " +
           "Kudu tables: " + table.getTableName());
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
index d00c0cd..6abac52 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -23,9 +23,9 @@ import java.util.Set;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -80,7 +80,7 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
           "supported on HBase tables.");
     }
 
-    boolean isKuduTable = t instanceof KuduTable;
+    boolean isKuduTable = t instanceof FeKuduTable;
     if (isKuduTable && replaceExistingCols_) {
       throw new AnalysisException("ALTER TABLE REPLACE COLUMNS is not " +
           "supported on Kudu tables.");

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
index e6b0d49..f7d8ce8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
@@ -20,10 +20,10 @@ package org.apache.impala.analysis;
 import java.util.Map;
 
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.KuduColumn;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAlterColParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -134,7 +134,7 @@ public class AlterTableAlterColStmt extends AlterTableStmt {
     }
     if (newColDef_.hasKuduOptions()) {
       // Disallow Kudu options on non-Kudu tables.
-      if (!(t instanceof KuduTable)) {
+      if (!(t instanceof FeKuduTable)) {
         if (isDropDefault_) {
           throw new AnalysisException(String.format(
               "Unsupported column option for non-Kudu table: DROP DEFAULT"));
@@ -153,7 +153,7 @@ public class AlterTableAlterColStmt extends AlterTableStmt {
                 newColDef_.toString()));
       }
     }
-    if (t instanceof KuduTable) {
+    if (t instanceof FeKuduTable) {
       KuduColumn col = (KuduColumn) t.getColumn(colName_);
       if (!col.getType().equals(newColDef_.getType())) {
         throw new AnalysisException(String.format("Cannot change the type of a Kudu " +

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index 048b985..707f3de 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -18,8 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableDropPartitionParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -75,7 +75,7 @@ public class AlterTableDropPartitionStmt extends AlterTableStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
     FeTable table = getTargetTable();
-    if (table instanceof KuduTable) {
+    if (table instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE DROP PARTITION is not supported for " +
           "Kudu tables: " + partitionSet_.toSql());
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index 31c046a..b88216c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -17,8 +17,8 @@
 
 package org.apache.impala.analysis;
 
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
@@ -56,7 +56,7 @@ public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
     FeTable tbl = getTargetTable();
-    if (tbl instanceof KuduTable) {
+    if (tbl instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE SET FILEFORMAT is not supported " +
           "on Kudu tables: " + tbl.getFullName());
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index db8a804..b034ab5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -23,10 +23,10 @@ import java.util.List;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetLocationParams;
@@ -111,7 +111,7 @@ public class AlterTableSetLocationStmt extends AlterTableSetStmt {
             "uncache before changing the location using: ALTER TABLE %s SET UNCACHED",
             table.getFullName()));
       }
-    } else if (table instanceof KuduTable) {
+    } else if (table instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE SET LOCATION is not supported on Kudu " +
           "tables: " + table.getFullName());
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
index 26c5ac0..1f02390 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
@@ -93,7 +94,7 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
           hive_metastoreConstants.META_TABLE_STORAGE));
     }
 
-    if (getTargetTable() instanceof KuduTable) analyzeKuduTable(analyzer);
+    if (getTargetTable() instanceof FeKuduTable) analyzeKuduTable(analyzer);
 
     // Check avro schema when it is set in avro.schema.url or avro.schema.literal to
     // avoid potential metadata corruption (see IMPALA-2042).
@@ -211,7 +212,7 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
     // AlterTableSetStmt::analyze().
     Preconditions.checkState(!(table instanceof HBaseTable));
 
-    if (table instanceof KuduTable) {
+    if (table instanceof FeKuduTable) {
       throw new AnalysisException(String.format("'%s' table property is not supported " +
           "for Kudu tables.", AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS));
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
index 2b54b74..0f99c9b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
@@ -20,9 +20,9 @@ package org.apache.impala.analysis;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetTblPropertiesParams;
@@ -71,7 +71,7 @@ public class AlterTableSortByStmt extends AlterTableStmt {
     if (targetTable instanceof HBaseTable) {
       throw new AnalysisException("ALTER TABLE SORT BY not supported on HBase tables.");
     }
-    if (targetTable instanceof KuduTable) {
+    if (targetTable instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE SORT BY not supported on Kudu tables.");
     }
 

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index 1a4ea13..e902383 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -38,17 +38,16 @@ import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.DataSourceTable;
 import org.apache.impala.catalog.DatabaseNotFoundException;
 import org.apache.impala.catalog.FeCatalog;
 import org.apache.impala.catalog.FeDataSourceTable;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.IncompleteTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
@@ -589,7 +588,7 @@ public class Analyzer {
       if (table instanceof FeView) return new InlineViewRef((FeView) table, tableRef);
       // The table must be a base table.
       Preconditions.checkState(table instanceof FeFsTable ||
-          table instanceof KuduTable ||
+          table instanceof FeKuduTable ||
           table instanceof HBaseTable ||
           table instanceof FeDataSourceTable);
       return new BaseTableRef(tableRef, resolvedPath);

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
index 76b63c9..33a271b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
@@ -25,6 +25,7 @@ import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Db;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsTable;
@@ -221,7 +222,7 @@ public class CreateTableAsSelectStmt extends StatementBase {
         tmpTable = HdfsTable.createCtasTarget((Db)db, msTbl);
       }
       Preconditions.checkState(tmpTable != null &&
-          (tmpTable instanceof FeFsTable || tmpTable instanceof KuduTable));
+          (tmpTable instanceof FeFsTable || tmpTable instanceof FeKuduTable));
 
       insertStmt_.setTargetTable(tmpTable);
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index 32efc2c..72ec828 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -19,18 +19,19 @@ package org.apache.impala.analysis;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduColumn;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
@@ -440,7 +441,7 @@ public class InsertStmt extends StatementBase {
 
     // Perform operation-specific analysis.
     if (isUpsert_) {
-      if (!(table_ instanceof KuduTable)) {
+      if (!(table_ instanceof FeKuduTable)) {
         throw new AnalysisException("UPSERT is only supported for Kudu tables");
       }
     } else {
@@ -507,7 +508,7 @@ public class InsertStmt extends StatementBase {
       }
     }
 
-    if (table_ instanceof KuduTable) {
+    if (table_ instanceof FeKuduTable) {
       if (overwrite_) {
         throw new AnalysisException("INSERT OVERWRITE not supported for Kudu tables.");
       }
@@ -536,7 +537,7 @@ public class InsertStmt extends StatementBase {
       // We've already ruled out too many columns in the permutation and partition clauses
       // by checking that there are no duplicates and that every column mentioned actually
       // exists. So all columns aren't mentioned in the query.
-      if (table_ instanceof KuduTable) {
+      if (table_ instanceof FeKuduTable) {
         checkRequiredKuduColumns(mentionedColumnNames);
       } else if (table_ instanceof HBaseTable) {
         checkRequiredHBaseColumns(mentionedColumnNames);
@@ -578,8 +579,8 @@ public class InsertStmt extends StatementBase {
    */
   private void checkRequiredKuduColumns(Set<String> mentionedColumnNames)
       throws AnalysisException {
-    Preconditions.checkState(table_ instanceof KuduTable);
-    List<String> keyColumns = ((KuduTable) table_).getPrimaryKeyColumnNames();
+    Preconditions.checkState(table_ instanceof FeKuduTable);
+    List<String> keyColumns = ((FeKuduTable) table_).getPrimaryKeyColumnNames();
     List<String> missingKeyColumnNames = Lists.newArrayList();
     for (Column column : table_.getColumns()) {
       if (!mentionedColumnNames.contains(column.getName())
@@ -660,10 +661,10 @@ public class InsertStmt extends StatementBase {
     List<String> tmpPartitionKeyNames = new ArrayList<String>();
 
     int numClusteringCols = (tbl instanceof HBaseTable) ? 0 : tbl.getNumClusteringCols();
-    boolean isKuduTable = table_ instanceof KuduTable;
+    boolean isKuduTable = table_ instanceof FeKuduTable;
     Set<String> kuduPartitionColumnNames = null;
     if (isKuduTable) {
-      kuduPartitionColumnNames = ((KuduTable) table_).getPartitionColumnNames();
+      kuduPartitionColumnNames = getKuduPartitionColumnNames((FeKuduTable) table_);
     }
 
     // Check dynamic partition columns for type compatibility.
@@ -759,14 +760,14 @@ public class InsertStmt extends StatementBase {
       }
       // Store exprs for Kudu key columns.
       if (matchFound && isKuduTable) {
-        KuduTable kuduTable = (KuduTable) table_;
-        if (kuduTable.isPrimaryKeyColumn(tblColumn.getName())) {
+        FeKuduTable kuduTable = (FeKuduTable) table_;
+        if (kuduTable.getPrimaryKeyColumnNames().contains(tblColumn.getName())) {
           primaryKeyExprs_.add(Iterables.getLast(resultExprs_));
         }
       }
     }
 
-    if (table_ instanceof KuduTable) {
+    if (table_ instanceof FeKuduTable) {
       Preconditions.checkState(!primaryKeyExprs_.isEmpty());
     }
 
@@ -783,6 +784,14 @@ public class InsertStmt extends StatementBase {
     }
   }
 
+  private static Set<String> getKuduPartitionColumnNames(FeKuduTable table) {
+    Set<String> ret = new HashSet<String>();
+    for (KuduPartitionParam partitionParam : table.getPartitionBy()) {
+      ret.addAll(partitionParam.getColumnNames());
+    }
+    return ret;
+  }
+
   /**
    * Analyzes the 'sort.columns' table property if it is set, and populates
    * sortColumns_ and sortExprs_. If there are errors during the analysis, this will throw

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/KuduPartitionExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/KuduPartitionExpr.java b/fe/src/main/java/org/apache/impala/analysis/KuduPartitionExpr.java
index 08c53fd..e888726 100644
--- a/fe/src/main/java/org/apache/impala/analysis/KuduPartitionExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/KuduPartitionExpr.java
@@ -18,9 +18,8 @@
 package org.apache.impala.analysis;
 
 import java.util.List;
-import java.util.Set;
 
-import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TExprNode;
@@ -30,8 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
 
 /**
  * Internal expr that calls into the Kudu client to determine the partition index for
@@ -43,12 +40,12 @@ public class KuduPartitionExpr extends Expr {
 
   // The table to use the partitioning scheme from.
   private final int targetTableId_;
-  private final KuduTable targetTable_;
+  private final FeKuduTable targetTable_;
   // Maps from this Epxrs children to column positions in the table, i.e. children_[i]
   // produces the value for column partitionColPos_[i].
   private List<Integer> partitionColPos_;
 
-  public KuduPartitionExpr(int targetTableId, KuduTable targetTable,
+  public KuduPartitionExpr(int targetTableId, FeKuduTable targetTable,
       List<Expr> partitionKeyExprs, List<Integer> partitionKeyIdxs) {
     Preconditions.checkState(partitionKeyExprs.size() == partitionKeyIdxs.size());
     targetTableId_ = targetTableId;

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
index 29266f6..9aa0964 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
@@ -26,8 +26,8 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.Pair;
@@ -79,7 +79,7 @@ public abstract class ModifyStmt extends StatementBase {
 
   // Target Kudu table. Since currently only Kudu tables are supported, we use a
   // concrete table class. Result of analysis.
-  protected KuduTable table_;
+  protected FeKuduTable table_;
 
   // END: Members that need to be reset()
   /////////////////////////////////////////
@@ -153,12 +153,12 @@ public abstract class ModifyStmt extends StatementBase {
     Preconditions.checkNotNull(targetTableRef_);
     FeTable dstTbl = targetTableRef_.getTable();
     // Only Kudu tables can be updated
-    if (!(dstTbl instanceof KuduTable)) {
+    if (!(dstTbl instanceof FeKuduTable)) {
       throw new AnalysisException(
           format("Impala does not support modifying a non-Kudu table: %s",
               dstTbl.getFullName()));
     }
-    table_ = (KuduTable) dstTbl;
+    table_ = (FeKuduTable) dstTbl;
 
     // Make sure that the user is allowed to modify the target table. Use ALL because no
     // UPDATE / DELETE privilege exists yet (IMPALA-3840).
@@ -231,7 +231,7 @@ public abstract class ModifyStmt extends StatementBase {
     HashSet<SlotId> keySlots = Sets.newHashSet();
 
     // Mapping from column name to index
-    ArrayList<Column> cols = table_.getColumnsInHiveOrder();
+    List<Column> cols = table_.getColumnsInHiveOrder();
     HashMap<String, Integer> colIndexMap = Maps.newHashMap();
     for (int i = 0; i < cols.size(); i++) {
       colIndexMap.put(cols.get(i).getName(), i);

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
index 5a7b3bc..04d8703 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
@@ -21,9 +21,9 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TShowStatsOp;
 import org.apache.impala.thrift.TShowStatsParams;
@@ -87,10 +87,10 @@ public class ShowStatsStmt extends StatementBase {
         throw new AnalysisException(getSqlPrefix() + " must target a Kudu table: " +
             table_.getFullName());
       }
-    } else if (table_ instanceof KuduTable) {
-      KuduTable kuduTable = (KuduTable) table_;
+    } else if (table_ instanceof FeKuduTable) {
+      FeKuduTable kuduTable = (FeKuduTable) table_;
       if (op_ == TShowStatsOp.RANGE_PARTITIONS &&
-          kuduTable.getRangePartitioningColNames().isEmpty()) {
+          FeKuduTable.Utils.getRangePartitioningColNames(kuduTable).isEmpty()) {
         throw new AnalysisException(getSqlPrefix() + " requested but table does not " +
             "have range partitions: " + table_.getFullName());
       }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index cdd67ce..a669caf 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.ql.parse.HiveLexer;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.Function;
@@ -256,8 +257,8 @@ public class ToSqlUtils {
     String storageHandlerClassName = table.getStorageHandlerClassName();
     List<String> primaryKeySql = Lists.newArrayList();
     String kuduPartitionByParams = null;
-    if (table instanceof KuduTable) {
-      KuduTable kuduTable = (KuduTable) table;
+    if (table instanceof FeKuduTable) {
+      FeKuduTable kuduTable = (FeKuduTable) table;
       // Kudu tables don't use LOCATION syntax
       location = null;
       format = HdfsFileFormat.KUDU;

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index f103d8f..22693ad 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -26,9 +26,9 @@ import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.StructType;
 import org.apache.impala.thrift.TTupleDescriptor;
 
@@ -323,7 +323,7 @@ public class TupleDescriptor {
    * Returns true if this tuple has at least one materialized nullable Kudu scan slot.
    */
   private boolean hasNullableKuduScanSlots() {
-    if (!(getTable() instanceof KuduTable)) return false;
+    if (!(getTable() instanceof FeKuduTable)) return false;
     for (SlotDescriptor d: slots_) {
       if (d.isMaterialized() && d.getIsNullable()) return true;
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java b/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
new file mode 100644
index 0000000..dc5f45d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeKuduTable.java
@@ -0,0 +1,221 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.catalog;
+
+import java.util.Collections;
+import java.util.List;
+
+import javax.xml.bind.DatatypeConverter;
+
+import org.apache.impala.analysis.KuduPartitionParam;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.util.KuduUtil;
+import org.apache.impala.util.TResultRowBuilder;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.LocatedTablet;
+import org.apache.kudu.client.PartitionSchema;
+import org.apache.kudu.client.PartitionSchema.HashBucketSchema;
+import org.apache.kudu.client.PartitionSchema.RangeSchema;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Frontend interface for interacting with a Kudu-backed table.
+ */
+public interface FeKuduTable extends FeTable {
+  /**
+   * Return the comma-separated list of masters for the Kudu cluster
+   * backing this table.
+   */
+  String getKuduMasterHosts();
+
+  /**
+   * Return the name of the Kudu table backing this table.
+   */
+  String getKuduTableName();
+
+  /**
+   * Return the names of the columns that make up the primary key
+   * of this table.
+   */
+  List<String> getPrimaryKeyColumnNames();
+
+  /**
+   * Return the Kudu partitioning clause information.
+   */
+  List<KuduPartitionParam> getPartitionBy();
+
+  /**
+   * Utility functions for acting on FeKuduTable.
+   *
+   * When we fully move to Java 8, these can become default methods of the
+   * interface.
+   */
+  public static abstract class Utils {
+    /**
+     * Returns the range-based partitioning of the given table if it exists,
+     * null otherwise.
+     */
+    private static KuduPartitionParam getRangePartitioning(FeKuduTable table) {
+      for (KuduPartitionParam partitionParam: table.getPartitionBy()) {
+        if (partitionParam.getType() == KuduPartitionParam.Type.RANGE) {
+          return partitionParam;
+        }
+      }
+      return null;
+    }
+
+    /**
+     * Returns the column names of the table's range-based partitioning or an empty
+     * list if the table doesn't have a range-based partitioning.
+     */
+    public static List<String> getRangePartitioningColNames(FeKuduTable table) {
+      KuduPartitionParam rangePartitioning = getRangePartitioning(table);
+      if (rangePartitioning == null) return Collections.<String>emptyList();
+      return rangePartitioning.getColumnNames();
+    }
+
+    public static List<KuduPartitionParam> loadPartitionByParams(
+        org.apache.kudu.client.KuduTable kuduTable) {
+      List<KuduPartitionParam> ret = Lists.newArrayList();
+
+      Preconditions.checkNotNull(kuduTable);
+      Schema tableSchema = kuduTable.getSchema();
+      PartitionSchema partitionSchema = kuduTable.getPartitionSchema();
+      for (HashBucketSchema hashBucketSchema: partitionSchema.getHashBucketSchemas()) {
+        List<String> columnNames = Lists.newArrayList();
+        for (int colId: hashBucketSchema.getColumnIds()) {
+          columnNames.add(getColumnNameById(tableSchema, colId));
+        }
+        ret.add(KuduPartitionParam.createHashParam(columnNames,
+            hashBucketSchema.getNumBuckets()));
+      }
+      RangeSchema rangeSchema = partitionSchema.getRangeSchema();
+      List<Integer> columnIds = rangeSchema.getColumns();
+      if (columnIds.isEmpty()) return ret;
+      List<String> columnNames = Lists.newArrayList();
+      for (int colId: columnIds) columnNames.add(getColumnNameById(tableSchema, colId));
+      // We don't populate the split values because Kudu's API doesn't currently support
+      // retrieving the split values for range partitions.
+      // TODO: File a Kudu JIRA.
+      ret.add(KuduPartitionParam.createRangeParam(columnNames, null));
+
+      return ret;
+    }
+
+    public static TResultSet getTableStats(FeKuduTable table)
+        throws ImpalaRuntimeException {
+      TResultSet result = new TResultSet();
+      TResultSetMetadata resultSchema = new TResultSetMetadata();
+      result.setSchema(resultSchema);
+
+      resultSchema.addToColumns(new TColumn("# Rows", Type.INT.toThrift()));
+      resultSchema.addToColumns(new TColumn("Start Key", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Stop Key", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Leader Replica", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("# Replicas", Type.INT.toThrift()));
+
+      KuduClient client = KuduUtil.getKuduClient(table.getKuduMasterHosts());
+      try {
+        org.apache.kudu.client.KuduTable kuduTable = client.openTable(
+            table.getKuduTableName());
+        List<LocatedTablet> tablets =
+            kuduTable.getTabletsLocations(BackendConfig.INSTANCE.getKuduClientTimeoutMs());
+        if (tablets.isEmpty()) {
+          TResultRowBuilder builder = new TResultRowBuilder();
+          result.addToRows(
+              builder.add("-1").add("N/A").add("N/A").add("N/A").add("-1").get());
+          return result;
+        }
+        for (LocatedTablet tab: tablets) {
+          TResultRowBuilder builder = new TResultRowBuilder();
+          builder.add("-1");   // The Kudu client API doesn't expose tablet row counts.
+          builder.add(DatatypeConverter.printHexBinary(
+              tab.getPartition().getPartitionKeyStart()));
+          builder.add(DatatypeConverter.printHexBinary(
+              tab.getPartition().getPartitionKeyEnd()));
+          LocatedTablet.Replica leader = tab.getLeaderReplica();
+          if (leader == null) {
+            // Leader might be null, if it is not yet available (e.g. during
+            // leader election in Kudu)
+            builder.add("Leader n/a");
+          } else {
+            builder.add(leader.getRpcHost() + ":" + leader.getRpcPort().toString());
+          }
+          builder.add(tab.getReplicas().size());
+          result.addToRows(builder.get());
+        }
+
+      } catch (Exception e) {
+        throw new ImpalaRuntimeException("Error accessing Kudu for table stats.", e);
+      }
+      return result;
+    }
+
+    public static TResultSet getRangePartitions(FeKuduTable table)
+        throws ImpalaRuntimeException {
+      TResultSet result = new TResultSet();
+      TResultSetMetadata resultSchema = new TResultSetMetadata();
+      result.setSchema(resultSchema);
+
+      // Build column header
+      String header = "RANGE (" + Joiner.on(',').join(
+          Utils.getRangePartitioningColNames(table)) + ")";
+      resultSchema.addToColumns(new TColumn(header, Type.STRING.toThrift()));
+      KuduClient client = KuduUtil.getKuduClient(table.getKuduMasterHosts());
+      try {
+        org.apache.kudu.client.KuduTable kuduTable = client.openTable(
+            table.getKuduTableName());
+        // The Kudu table API will return the partitions in sorted order by value.
+        List<String> partitions = kuduTable.getFormattedRangePartitions(
+            BackendConfig.INSTANCE.getKuduClientTimeoutMs());
+        if (partitions.isEmpty()) {
+          TResultRowBuilder builder = new TResultRowBuilder();
+          result.addToRows(builder.add("").get());
+          return result;
+        }
+        for (String partition: partitions) {
+          TResultRowBuilder builder = new TResultRowBuilder();
+          builder.add(partition);
+          result.addToRows(builder.get());
+        }
+      } catch (Exception e) {
+        throw new ImpalaRuntimeException("Error accessing Kudu for table partitions.", e);
+      }
+      return result;
+    }
+
+    /**
+     * Returns the name of a Kudu column with id 'colId'.
+     */
+    private static String getColumnNameById(Schema tableSchema, int colId) {
+      Preconditions.checkNotNull(tableSchema);
+      ColumnSchema col = tableSchema.getColumnByIndex(tableSchema.getColumnIndex(colId));
+      Preconditions.checkNotNull(col);
+      return col.getName();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 5d6a10d..e277d3c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -18,13 +18,9 @@
 package org.apache.impala.catalog;
 
 import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import javax.xml.bind.DatatypeConverter;
-
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -32,30 +28,18 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.impala.analysis.ColumnDef;
 import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.common.ImpalaRuntimeException;
-import org.apache.impala.service.BackendConfig;
-import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.thrift.TCatalogObjectType;
-import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.TKuduPartitionByHashParam;
 import org.apache.impala.thrift.TKuduPartitionByRangeParam;
 import org.apache.impala.thrift.TKuduPartitionParam;
 import org.apache.impala.thrift.TKuduTable;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.impala.thrift.TResultSetMetadata;
 import org.apache.impala.thrift.TTable;
 import org.apache.impala.thrift.TTableDescriptor;
 import org.apache.impala.thrift.TTableType;
 import org.apache.impala.util.KuduUtil;
-import org.apache.impala.util.TResultRowBuilder;
 import org.apache.kudu.ColumnSchema;
-import org.apache.kudu.Schema;
 import org.apache.kudu.client.KuduClient;
 import org.apache.kudu.client.KuduException;
-import org.apache.kudu.client.LocatedTablet;
-import org.apache.kudu.client.PartitionSchema;
-import org.apache.kudu.client.PartitionSchema.HashBucketSchema;
-import org.apache.kudu.client.PartitionSchema.RangeSchema;
-import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
 import com.codahale.metrics.Timer;
@@ -67,7 +51,7 @@ import com.google.common.collect.Lists;
 /**
  * Representation of a Kudu table in the catalog cache.
  */
-public class KuduTable extends Table {
+public class KuduTable extends Table implements FeKuduTable {
 
   // Alias to the string key that identifies the storage handler for Kudu tables.
   public static final String KEY_STORAGE_HANDLER =
@@ -112,7 +96,7 @@ public class KuduTable extends Table {
 
   // Partitioning schemes of this Kudu table. Both range and hash-based partitioning are
   // supported.
-  private final List<KuduPartitionParam> partitionBy_ = Lists.newArrayList();
+  private List<KuduPartitionParam> partitionBy_;
 
   // Schema of the underlying Kudu table.
   private org.apache.kudu.Schema kuduSchema_;
@@ -140,48 +124,24 @@ public class KuduTable extends Table {
     return KUDU_STORAGE_HANDLER.equals(msTbl.getParameters().get(KEY_STORAGE_HANDLER));
   }
 
+  @Override
   public String getKuduTableName() { return kuduTableName_; }
+  @Override
   public String getKuduMasterHosts() { return kuduMasters_; }
+
   public org.apache.kudu.Schema getKuduSchema() { return kuduSchema_; }
 
+  @Override
   public List<String> getPrimaryKeyColumnNames() {
     return ImmutableList.copyOf(primaryKeyColumnNames_);
   }
 
+  @Override
   public List<KuduPartitionParam> getPartitionBy() {
+    Preconditions.checkState(partitionBy_ != null);
     return ImmutableList.copyOf(partitionBy_);
   }
 
-  public Set<String> getPartitionColumnNames() {
-    Set<String> ret = new HashSet<String>();
-    for (KuduPartitionParam partitionParam : partitionBy_) {
-      ret.addAll(partitionParam.getColumnNames());
-    }
-    return ret;
-  }
-
-  /**
-   * Returns the range-based partitioning of this table if it exists, null otherwise.
-   */
-  private KuduPartitionParam getRangePartitioning() {
-    for (KuduPartitionParam partitionParam: partitionBy_) {
-      if (partitionParam.getType() == KuduPartitionParam.Type.RANGE) {
-        return partitionParam;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns the column names of the table's range-based partitioning or an empty
-   * list if the table doesn't have a range-based partitioning.
-   */
-  public List<String> getRangePartitioningColNames() {
-    KuduPartitionParam rangePartitioning = getRangePartitioning();
-    if (rangePartitioning == null) return Collections.<String>emptyList();
-    return rangePartitioning.getColumnNames();
-  }
-
   /**
    * Load schema and partitioning schemes directly from Kudu.
    */
@@ -203,7 +163,8 @@ public class KuduTable extends Table {
     Preconditions.checkNotNull(kuduTable);
 
     loadSchema(kuduTable);
-    loadPartitionByParams(kuduTable);
+    Preconditions.checkState(!colsByPos_.isEmpty());
+    partitionBy_ = Utils.loadPartitionByParams(kuduTable);
   }
 
   /**
@@ -282,40 +243,6 @@ public class KuduTable extends Table {
     }
   }
 
-  private void loadPartitionByParams(org.apache.kudu.client.KuduTable kuduTable) {
-    Preconditions.checkNotNull(kuduTable);
-    Schema tableSchema = kuduTable.getSchema();
-    PartitionSchema partitionSchema = kuduTable.getPartitionSchema();
-    Preconditions.checkState(!colsByPos_.isEmpty());
-    partitionBy_.clear();
-    for (HashBucketSchema hashBucketSchema: partitionSchema.getHashBucketSchemas()) {
-      List<String> columnNames = Lists.newArrayList();
-      for (int colId: hashBucketSchema.getColumnIds()) {
-        columnNames.add(getColumnNameById(tableSchema, colId));
-      }
-      partitionBy_.add(KuduPartitionParam.createHashParam(columnNames,
-          hashBucketSchema.getNumBuckets()));
-    }
-    RangeSchema rangeSchema = partitionSchema.getRangeSchema();
-    List<Integer> columnIds = rangeSchema.getColumns();
-    if (columnIds.isEmpty()) return;
-    List<String> columnNames = Lists.newArrayList();
-    for (int colId: columnIds) columnNames.add(getColumnNameById(tableSchema, colId));
-    // We don't populate the split values because Kudu's API doesn't currently support
-    // retrieving the split values for range partitions.
-    // TODO: File a Kudu JIRA.
-    partitionBy_.add(KuduPartitionParam.createRangeParam(columnNames, null));
-  }
-
-  /**
-   * Returns the name of a Kudu column with id 'colId'.
-   */
-  private String getColumnNameById(Schema tableSchema, int colId) {
-    Preconditions.checkNotNull(tableSchema);
-    ColumnSchema col = tableSchema.getColumnByIndex(tableSchema.getColumnIndex(colId));
-    Preconditions.checkNotNull(col);
-    return col.getName();
-  }
 
   /**
    * Creates a temporary KuduTable object populated with the specified properties but has
@@ -333,7 +260,7 @@ public class KuduTable extends Table {
     for (ColumnDef pkColDef: primaryKeyColumnDefs) {
       tmpTable.primaryKeyColumnNames_.add(pkColDef.getColName());
     }
-    tmpTable.partitionBy_.addAll(partitionParams);
+    tmpTable.partitionBy_ = ImmutableList.copyOf(partitionParams);
     return tmpTable;
   }
 
@@ -353,23 +280,25 @@ public class KuduTable extends Table {
     kuduMasters_ = Joiner.on(',').join(tkudu.getMaster_addresses());
     primaryKeyColumnNames_.clear();
     primaryKeyColumnNames_.addAll(tkudu.getKey_columns());
-    loadPartitionByParamsFromThrift(tkudu.getPartition_by());
+    partitionBy_ = loadPartitionByParamsFromThrift(tkudu.getPartition_by());
   }
 
-  private void loadPartitionByParamsFromThrift(List<TKuduPartitionParam> params) {
-    partitionBy_.clear();
+  private static List<KuduPartitionParam> loadPartitionByParamsFromThrift(
+      List<TKuduPartitionParam> params) {
+    List<KuduPartitionParam> ret= Lists.newArrayList();
     for (TKuduPartitionParam param: params) {
       if (param.isSetBy_hash_param()) {
         TKuduPartitionByHashParam hashParam = param.getBy_hash_param();
-        partitionBy_.add(KuduPartitionParam.createHashParam(
+        ret.add(KuduPartitionParam.createHashParam(
             hashParam.getColumns(), hashParam.getNum_partitions()));
       } else {
         Preconditions.checkState(param.isSetBy_range_param());
         TKuduPartitionByRangeParam rangeParam = param.getBy_range_param();
-        partitionBy_.add(KuduPartitionParam.createRangeParam(rangeParam.getColumns(),
+        ret.add(KuduPartitionParam.createRangeParam(rangeParam.getColumns(),
             null));
       }
     }
+    return ret;
   }
 
   @Override
@@ -395,85 +324,4 @@ public class KuduTable extends Table {
     }
     return tbl;
   }
-
-  public boolean isPrimaryKeyColumn(String name) {
-    return primaryKeyColumnNames_.contains(name);
-  }
-
-  public TResultSet getTableStats() throws ImpalaRuntimeException {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-
-    resultSchema.addToColumns(new TColumn("# Rows", Type.INT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Start Key", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Stop Key", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Leader Replica", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("# Replicas", Type.INT.toThrift()));
-
-    KuduClient client = KuduUtil.getKuduClient(getKuduMasterHosts());
-    try {
-      org.apache.kudu.client.KuduTable kuduTable = client.openTable(kuduTableName_);
-      List<LocatedTablet> tablets =
-          kuduTable.getTabletsLocations(BackendConfig.INSTANCE.getKuduClientTimeoutMs());
-      if (tablets.isEmpty()) {
-        TResultRowBuilder builder = new TResultRowBuilder();
-        result.addToRows(
-            builder.add("-1").add("N/A").add("N/A").add("N/A").add("-1").get());
-        return result;
-      }
-      for (LocatedTablet tab: tablets) {
-        TResultRowBuilder builder = new TResultRowBuilder();
-        builder.add("-1");   // The Kudu client API doesn't expose tablet row counts.
-        builder.add(DatatypeConverter.printHexBinary(
-            tab.getPartition().getPartitionKeyStart()));
-        builder.add(DatatypeConverter.printHexBinary(
-            tab.getPartition().getPartitionKeyEnd()));
-        LocatedTablet.Replica leader = tab.getLeaderReplica();
-        if (leader == null) {
-          // Leader might be null, if it is not yet available (e.g. during
-          // leader election in Kudu)
-          builder.add("Leader n/a");
-        } else {
-          builder.add(leader.getRpcHost() + ":" + leader.getRpcPort().toString());
-        }
-        builder.add(tab.getReplicas().size());
-        result.addToRows(builder.get());
-      }
-
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Error accessing Kudu for table stats.", e);
-    }
-    return result;
-  }
-
-  public TResultSet getRangePartitions() throws ImpalaRuntimeException {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-
-    // Build column header
-    String header = "RANGE (" + Joiner.on(',').join(getRangePartitioningColNames()) + ")";
-    resultSchema.addToColumns(new TColumn(header, Type.STRING.toThrift()));
-    KuduClient client = KuduUtil.getKuduClient(getKuduMasterHosts());
-    try {
-      org.apache.kudu.client.KuduTable kuduTable = client.openTable(kuduTableName_);
-      // The Kudu table API will return the partitions in sorted order by value.
-      List<String> partitions = kuduTable.getFormattedRangePartitions(
-          BackendConfig.INSTANCE.getKuduClientTimeoutMs());
-      if (partitions.isEmpty()) {
-        TResultRowBuilder builder = new TResultRowBuilder();
-        result.addToRows(builder.add("").get());
-        return result;
-      }
-      for (String partition: partitions) {
-        TResultRowBuilder builder = new TResultRowBuilder();
-        builder.add(partition);
-        result.addToRows(builder.get());
-      }
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Error accessing Kudu for table partitions.", e);
-    }
-    return result;
-  }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
index 542ec32..03438c6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
@@ -66,14 +66,15 @@ public class LocalCatalog implements FeCatalog {
   private final MetaProvider metaProvider_;
   private Map<String, FeDb> dbs_ = Maps.newHashMap();
   private String nullPartitionKeyValue_;
+  private final String defaultKuduMasterHosts_;
 
-  public static FeCatalog create(String defaultKuduMasterHosts) {
-    // TODO(todd): store the kudu master hosts
-    return new LocalCatalog(new DirectMetaProvider());
+  public static LocalCatalog create(String defaultKuduMasterHosts) {
+    return new LocalCatalog(new DirectMetaProvider(), defaultKuduMasterHosts);
   }
 
-  public LocalCatalog(MetaProvider metaProvider) {
+  private LocalCatalog(MetaProvider metaProvider, String defaultKuduMasterHosts) {
     metaProvider_ = Preconditions.checkNotNull(metaProvider);
+    defaultKuduMasterHosts_ = defaultKuduMasterHosts;
   }
 
   @Override
@@ -198,7 +199,7 @@ public class LocalCatalog implements FeCatalog {
 
   @Override
   public String getDefaultKuduMasterHosts() {
-    throw new UnsupportedOperationException("TODO");
+    return defaultKuduMasterHosts_;
   }
 
   public String getNullPartitionKeyValue() {

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index b652e0d..5a5f3bd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -27,6 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.catalog.CatalogException;
@@ -79,14 +81,26 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
    */
   private ArrayList<HashSet<Long>> nullPartitionIds_;
 
+
+  /**
+   * The value that will be stored in a partition name to indicate NULL.
+   */
+  private final String nullColumnValue_;
+
   /**
    * Map assigning integer indexes for the hosts containing blocks for this table.
    * This is updated as a side effect of LocalFsPartition.loadFileDescriptors().
    */
   private final ListMap<TNetworkAddress> hostIndex_ = new ListMap<>();
 
-  public LocalFsTable(LocalDb db, String tblName, SchemaInfo schemaInfo) {
-    super(db, tblName, schemaInfo);
+  public LocalFsTable(LocalDb db, Table msTbl) {
+    super(db, msTbl);
+
+    // set NULL indicator string from table properties
+    String tableNullFormat =
+        msTbl.getParameters().get(serdeConstants.SERIALIZATION_NULL_FORMAT);
+    nullColumnValue_ = tableNullFormat != null ? tableNullFormat :
+        FeFsTable.DEFAULT_NULL_COLUMN_VALUE;
   }
 
   @Override
@@ -188,7 +202,7 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
 
     // TODO(todd): implement avro schema support
     THdfsTable hdfsTable = new THdfsTable(getHdfsBaseDir(), getColumnNames(),
-        getNullPartitionKeyValue(), schemaInfo_.getNullColumnValue(), idToPartition,
+        getNullPartitionKeyValue(), nullColumnValue_, idToPartition,
         tPrototypePartition);
 
     TTableDescriptor tableDesc = new TTableDescriptor(tableId, TTableType.HDFS_TABLE,

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
new file mode 100644
index 0000000..e12e3ac
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
@@ -0,0 +1,202 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.catalog.local;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.annotation.concurrent.Immutable;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.KuduPartitionParam;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeCatalogUtils;
+import org.apache.impala.catalog.FeKuduTable;
+import org.apache.impala.catalog.KuduColumn;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TKuduTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
+import org.apache.impala.util.KuduUtil;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.KuduException;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+public class LocalKuduTable extends LocalTable implements FeKuduTable {
+  private final TableParams tableParams_;
+  private final List<KuduPartitionParam> partitionBy_;
+  private final org.apache.kudu.client.KuduTable kuduTable_;
+  private final ImmutableList<String> primaryKeyColumnNames_;
+
+  /**
+   * Create a new instance based on the table metadata 'msTable' stored
+   * in the metastore.
+   */
+  static LocalTable loadFromKudu(LocalDb db, Table msTable) {
+    Preconditions.checkNotNull(db);
+    Preconditions.checkNotNull(msTable);
+    String fullTableName = msTable.getDbName() + "." + msTable.getTableName();
+
+    TableParams params = new TableParams(msTable);
+    org.apache.kudu.client.KuduTable kuduTable = params.openTable();
+    List<Column> cols = new ArrayList<>();
+    List<FieldSchema> fieldSchemas = new ArrayList<>();
+    convertColsFromKudu(kuduTable.getSchema(), cols, fieldSchemas);
+
+    // TODO(todd): update the schema in HMS if it doesn't match? This will
+    // no longer be necessary after the Kudu-HMS integration is complete, so
+    // maybe not worth implementing here for the LocalCatalog implementation.
+
+    // Use the schema derived from Kudu, rather than the one stored in the HMS.
+    msTable.getSd().setCols(fieldSchemas);
+
+    ColumnMap cmap = new ColumnMap(cols, /*numClusteringCols=*/0, fullTableName);
+    return new LocalKuduTable(db, msTable, cmap, kuduTable);
+  }
+
+  private static void convertColsFromKudu(Schema schema, List<Column> cols,
+      List<FieldSchema> fieldSchemas) {
+    Preconditions.checkArgument(cols.isEmpty());;
+    Preconditions.checkArgument(fieldSchemas.isEmpty());;
+
+    int pos = 0;
+    for (ColumnSchema colSchema: schema.getColumns()) {
+      KuduColumn kuduCol;
+      try {
+        kuduCol = KuduColumn.fromColumnSchema(colSchema, pos++);
+      } catch (ImpalaRuntimeException e) {
+        throw new LocalCatalogException(e);
+      }
+      Preconditions.checkNotNull(kuduCol);
+      // Add the HMS column
+      fieldSchemas.add(new FieldSchema(kuduCol.getName(),
+          kuduCol.getType().toSql().toLowerCase(), /*comment=*/null));
+      cols.add(kuduCol);
+    }
+  }
+
+  private LocalKuduTable(LocalDb db, Table msTable, ColumnMap cmap,
+      org.apache.kudu.client.KuduTable kuduTable) {
+    super(db, msTable, cmap);
+    tableParams_ = new TableParams(msTable);
+    kuduTable_ = kuduTable;
+    partitionBy_ = ImmutableList.copyOf(Utils.loadPartitionByParams(
+        kuduTable));
+
+    ImmutableList.Builder<String> b = ImmutableList.builder();
+    for (ColumnSchema c: kuduTable_.getSchema().getPrimaryKeyColumns()) {
+      b.add(c.getName().toLowerCase());
+    }
+    primaryKeyColumnNames_ = b.build();
+  }
+
+  @Override
+  public String getKuduMasterHosts() {
+    return tableParams_.masters_;
+  }
+
+
+  @Override
+  public String getKuduTableName() {
+    return tableParams_.kuduTableName_;
+  }
+
+  @Override
+  public List<String> getPrimaryKeyColumnNames() {
+    return primaryKeyColumnNames_;
+  }
+
+  @Override
+  public List<KuduPartitionParam> getPartitionBy() {
+    return partitionBy_;
+  }
+
+  @Override
+  public TTableDescriptor toThriftDescriptor(int tableId,
+      Set<Long> referencedPartitions) {
+    // TODO(todd): the old implementation passes kuduTableName_ instead of name below.
+    TTableDescriptor desc = new TTableDescriptor(tableId, TTableType.KUDU_TABLE,
+        FeCatalogUtils.getTColumnDescriptors(this),
+        getNumClusteringCols(),
+        name_, db_.getName());
+    TKuduTable tbl = new TKuduTable();
+    tbl.setKey_columns(Preconditions.checkNotNull(primaryKeyColumnNames_));
+    tbl.setMaster_addresses(tableParams_.getMastersAsList());
+    tbl.setTable_name(tableParams_.kuduTableName_);
+    Preconditions.checkNotNull(partitionBy_);
+    // IMPALA-5154: partitionBy_ may be empty if Kudu table created outside Impala,
+    // partition_by must be explicitly created because the field is required.
+    tbl.partition_by = Lists.newArrayList();
+    for (KuduPartitionParam partitionParam: partitionBy_) {
+      tbl.addToPartition_by(partitionParam.toThrift());
+    }
+    desc.setKuduTable(tbl);
+    return desc;
+  }
+
+  /**
+   * Parsed parameters from the HMS indicating the cluster and table name for
+   * a Kudu table.
+   */
+  @Immutable
+  private static class TableParams {
+    private final String kuduTableName_;
+    private final String masters_;
+
+    TableParams(Table msTable) {
+      String fullTableName = msTable.getDbName() + "." + msTable.getTableName();
+      Map<String, String> params = msTable.getParameters();
+      kuduTableName_ = params.get(KuduTable.KEY_TABLE_NAME);
+      if (kuduTableName_ == null) {
+        throw new LocalCatalogException("No " + KuduTable.KEY_TABLE_NAME +
+            " property found for table " + fullTableName);
+      }
+      masters_ = params.get(KuduTable.KEY_MASTER_HOSTS);
+      if (masters_ == null) {
+        throw new LocalCatalogException("No " + KuduTable.KEY_MASTER_HOSTS +
+            " property found for table " + fullTableName);
+      }
+    }
+
+    public List<String> getMastersAsList() {
+      return Lists.newArrayList(masters_.split(","));
+    }
+
+    public org.apache.kudu.client.KuduTable openTable() {
+      KuduClient kuduClient = KuduUtil.getKuduClient(masters_);
+      org.apache.kudu.client.KuduTable kuduTable;
+      try {
+        kuduTable = kuduClient.openTable(kuduTableName_);
+      } catch (KuduException e) {
+        throw new LocalCatalogException(
+            String.format("Error opening Kudu table '%s', Kudu error: %s",
+                kuduTableName_, e.getMessage()));
+      }
+      return kuduTable;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
index d128ca2..57da5f2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
@@ -20,19 +20,15 @@ package org.apache.impala.catalog.local;
 import java.util.ArrayList;
 import java.util.List;
 
-import javax.annotation.concurrent.Immutable;
-
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.impala.analysis.TableName;
 import org.apache.impala.catalog.ArrayType;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.DataSourceTable;
 import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDb;
-import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsFileFormat;
@@ -63,25 +59,30 @@ abstract class LocalTable implements FeTable {
   protected final LocalDb db_;
   /** The lower-case name of the table. */
   protected final String name_;
-  protected final SchemaInfo schemaInfo_;
+
+  private final ColumnMap cols_;
+
+  protected final Table msTable_;
+
+  private final TTableStats tableStats_;
 
   public static LocalTable load(LocalDb db, String tblName) {
     // In order to know which kind of table subclass to instantiate, we need
     // to eagerly grab and parse the top-level Table object from the HMS.
-    SchemaInfo schemaInfo = SchemaInfo.load(db, tblName);
     LocalTable t = null;
-    Table msTbl = schemaInfo.msTable_;
+    Table msTbl = loadMsTable(db, tblName);
     if (TableType.valueOf(msTbl.getTableType()) == TableType.VIRTUAL_VIEW) {
-      t = new LocalView(db, tblName, schemaInfo);
+      t = new LocalView(db, msTbl);
     } else if (HBaseTable.isHBaseTable(msTbl)) {
       // TODO(todd) support HBase table
     } else if (KuduTable.isKuduTable(msTbl)) {
       // TODO(todd) support kudu table
+      t = LocalKuduTable.loadFromKudu(db, msTbl);
     } else if (DataSourceTable.isDataSourceTable(msTbl)) {
       // TODO(todd) support datasource table
     } else if (HdfsFileFormat.isHdfsInputFormatClass(
-        schemaInfo.msTable_.getSd().getInputFormat())) {
-      t = new LocalFsTable(db, tblName, schemaInfo);
+        msTbl.getSd().getInputFormat())) {
+      t = new LocalFsTable(db, msTbl);
     }
 
     if (t == null) {
@@ -96,11 +97,38 @@ abstract class LocalTable implements FeTable {
     t.loadColumnStats();
     return t;
   }
-  public LocalTable(LocalDb db, String tblName, SchemaInfo schemaInfo) {
-    this.db_ = Preconditions.checkNotNull(db);
-    this.name_ = Preconditions.checkNotNull(tblName);
-    this.schemaInfo_ = Preconditions.checkNotNull(schemaInfo);
+
+
+  /**
+   * Load the Table instance from the metastore.
+   */
+  private static Table loadMsTable(LocalDb db, String tblName) {
     Preconditions.checkArgument(tblName.toLowerCase().equals(tblName));
+
+    try {
+      return db.getCatalog().getMetaProvider().loadTable(db.getName(), tblName);
+    } catch (TException e) {
+      throw new LocalCatalogException(String.format(
+          "Could not load table %s.%s from metastore",
+          db.getName(), tblName), e);
+    }
+  }
+
+  public LocalTable(LocalDb db, Table msTbl, ColumnMap cols) {
+    this.db_ = Preconditions.checkNotNull(db);
+    this.name_ = msTbl.getTableName();
+    this.cols_ = cols;
+
+    this.msTable_ = msTbl;
+
+    tableStats_ = new TTableStats(
+        FeCatalogUtils.getRowCount(msTable_.getParameters()));
+    tableStats_.setTotal_file_bytes(
+        FeCatalogUtils.getTotalSize(msTable_.getParameters()));
+  }
+
+  public LocalTable(LocalDb db, Table msTbl) {
+    this(db, msTbl, ColumnMap.fromMsTable(msTbl));
   }
 
   @Override
@@ -110,7 +138,7 @@ abstract class LocalTable implements FeTable {
 
   @Override
   public Table getMetaStoreTable() {
-    return schemaInfo_.msTable_;
+    return msTable_;
   }
 
   @Override
@@ -142,7 +170,7 @@ abstract class LocalTable implements FeTable {
   @Override
   public ArrayList<Column> getColumns() {
     // TODO(todd) why does this return ArrayList instead of List?
-    return new ArrayList<>(schemaInfo_.colsByPos_);
+    return new ArrayList<>(cols_.colsByPos_);
   }
 
   @Override
@@ -154,40 +182,37 @@ abstract class LocalTable implements FeTable {
 
   @Override
   public List<String> getColumnNames() {
-    return Column.toColumnNames(schemaInfo_.colsByPos_);
+    return cols_.getColumnNames();
   }
 
   @Override
   public List<Column> getClusteringColumns() {
-    return ImmutableList.copyOf(
-        schemaInfo_.colsByPos_.subList(0, schemaInfo_.numClusteringCols_));
+    return cols_.getClusteringColumns();
   }
 
   @Override
   public List<Column> getNonClusteringColumns() {
-    return ImmutableList.copyOf(schemaInfo_.colsByPos_.subList(
-        schemaInfo_.numClusteringCols_,
-        schemaInfo_.colsByPos_.size()));
+    return cols_.getNonClusteringColumns();
   }
 
   @Override
   public int getNumClusteringCols() {
-    return schemaInfo_.numClusteringCols_;
+    return cols_.getNumClusteringCols();
   }
 
   @Override
   public boolean isClusteringColumn(Column c) {
-    return schemaInfo_.isClusteringColumn(c);
+    return cols_.isClusteringColumn(c);
   }
 
   @Override
   public Column getColumn(String name) {
-    return schemaInfo_.colsByName_.get(name.toLowerCase());
+    return cols_.getByName(name);
   }
 
   @Override
   public ArrayType getType() {
-    return schemaInfo_.type_;
+    return cols_.getType();
   }
 
   @Override
@@ -197,12 +222,12 @@ abstract class LocalTable implements FeTable {
 
   @Override
   public long getNumRows() {
-    return schemaInfo_.tableStats_.num_rows;
+    return tableStats_.num_rows;
   }
 
   @Override
   public TTableStats getTTableStats() {
-    return schemaInfo_.tableStats_;
+    return tableStats_;
   }
 
   protected void loadColumnStats() {
@@ -215,75 +240,75 @@ abstract class LocalTable implements FeTable {
     }
   }
 
-  /**
-   * The table schema, loaded from the HMS Table object. This is common
-   * to all Table implementations and includes the column definitions and
-   * table-level stats.
-   *
-   * TODO(todd): some of this code is lifted from 'Table' and, with some
-   * effort, could be refactored to avoid duplication.
-   */
-  @Immutable
-  protected static class SchemaInfo {
-    private final Table msTable_;
-
+  protected static class ColumnMap {
     private final ArrayType type_;
     private final ImmutableList<Column> colsByPos_;
     private final ImmutableMap<String, Column> colsByName_;
 
     private final int numClusteringCols_;
-    private final String nullColumnValue_;
 
-    private final TTableStats tableStats_;
+    public static ColumnMap fromMsTable(Table msTbl) {
+      final String fullName = msTbl.getDbName() + "." + msTbl.getTableName();
+
+      // The number of clustering columns is the number of partition keys.
+      int numClusteringCols = msTbl.getPartitionKeys().size();
+      // Add all columns to the table. Ordering is important: partition columns first,
+      // then all other columns.
+      List<Column> cols;
+      try {
+        cols = FeCatalogUtils.fieldSchemasToColumns(
+            Iterables.concat(msTbl.getPartitionKeys(),
+                             msTbl.getSd().getCols()),
+            msTbl.getTableName());
+        return new ColumnMap(cols, numClusteringCols, fullName);
+      } catch (TableLoadingException e) {
+        throw new LocalCatalogException(e);
+      }
+    }
+
+    public ColumnMap(List<Column> cols, int numClusteringCols,
+        String fullTableName) {
+      this.colsByPos_ = ImmutableList.copyOf(cols);
+      this.numClusteringCols_ = numClusteringCols;
+      colsByName_ = indexColumnNames(colsByPos_);
+      type_ = new ArrayType(columnsToStructType(colsByPos_));
 
-    /**
-     * Load the schema info from the metastore.
-     */
-    static SchemaInfo load(LocalDb db, String tblName) {
       try {
-        Table msTbl = db.getCatalog().getMetaProvider().loadTable(
-            db.getName(), tblName);
-        return new SchemaInfo(msTbl);
-      } catch (TException e) {
-        throw new LocalCatalogException(String.format(
-            "Could not load table %s.%s from metastore",
-            db.getName(), tblName), e);
+        FeCatalogUtils.validateClusteringColumns(
+            colsByPos_.subList(0, numClusteringCols_),
+            fullTableName);
       } catch (TableLoadingException e) {
-        // In this case, the exception message already has the table name
-        // in the exception message.
         throw new LocalCatalogException(e);
       }
     }
 
-    SchemaInfo(Table msTbl) throws TableLoadingException {
-      msTable_ = msTbl;
-      // set NULL indicator string from table properties
-      String tableNullFormat =
-          msTbl.getParameters().get(serdeConstants.SERIALIZATION_NULL_FORMAT);
-      nullColumnValue_ = tableNullFormat != null ? tableNullFormat :
-          FeFsTable.DEFAULT_NULL_COLUMN_VALUE;
+    public ArrayType getType() {
+      return type_;
+    }
 
-      final String fullName = msTbl.getDbName() + "." + msTbl.getTableName();
 
-      // The number of clustering columns is the number of partition keys.
-      numClusteringCols_ = msTbl.getPartitionKeys().size();
-      // Add all columns to the table. Ordering is important: partition columns first,
-      // then all other columns.
-      colsByPos_ = FeCatalogUtils.fieldSchemasToColumns(
-          Iterables.concat(msTbl.getPartitionKeys(),
-                           msTbl.getSd().getCols()),
-          fullName);
-      FeCatalogUtils.validateClusteringColumns(
-          colsByPos_.subList(0, numClusteringCols_), fullName);
-      colsByName_ = indexColumnNames(colsByPos_);
-      type_ = new ArrayType(columnsToStructType(colsByPos_));
+    public Column getByName(String name) {
+      return colsByName_.get(name.toLowerCase());
+    }
 
-      tableStats_ = new TTableStats(
-          FeCatalogUtils.getRowCount(msTable_.getParameters()));
-      tableStats_.setTotal_file_bytes(
-          FeCatalogUtils.getTotalSize(msTable_.getParameters()));
+    public int getNumClusteringCols() {
+      return numClusteringCols_;
     }
 
+
+    public List<Column> getNonClusteringColumns() {
+      return colsByPos_.subList(numClusteringCols_, colsByPos_.size());
+    }
+
+    public List<Column> getClusteringColumns() {
+      return colsByPos_.subList(0, numClusteringCols_);
+    }
+
+    public List<String> getColumnNames() {
+      return Column.toColumnNames(colsByPos_);
+    }
+
+
     private static StructType columnsToStructType(List<Column> cols) {
       ArrayList<StructField> fields = Lists.newArrayListWithCapacity(cols.size());
       for (Column col : cols) {
@@ -300,13 +325,9 @@ abstract class LocalTable implements FeTable {
       return builder.build();
     }
 
-    private boolean isClusteringColumn(Column c) {
+    boolean isClusteringColumn(Column c) {
       Preconditions.checkArgument(colsByPos_.get(c.getPosition()) == c);
       return c.getPosition() < numClusteringCols_;
     }
-
-    protected String getNullColumnValue() {
-      return nullColumnValue_;
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/catalog/local/LocalView.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalView.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalView.java
index c380eb1..1aecdd2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalView.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalView.java
@@ -20,6 +20,7 @@ package org.apache.impala.catalog.local;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.impala.analysis.QueryStmt;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.TableLoadingException;
@@ -36,8 +37,8 @@ import org.apache.impala.thrift.TTableDescriptor;
 public class LocalView extends LocalTable implements FeView {
   private final QueryStmt queryStmt_;
 
-  public LocalView(LocalDb db, String tblName, SchemaInfo schemaInfo) {
-    super(db, tblName, schemaInfo);
+  public LocalView(LocalDb db, Table msTbl) {
+    super(db, msTbl);
 
     try {
       queryStmt_ = View.parseViewDef(this);

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
index ef24f6c..4302244 100644
--- a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
@@ -28,7 +28,7 @@ import org.apache.impala.analysis.InsertStmt;
 import org.apache.impala.analysis.JoinOperator;
 import static org.apache.impala.analysis.JoinOperator.*;
 import org.apache.impala.analysis.QueryStmt;
-import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.planner.JoinNode.DistributionMode;
@@ -202,13 +202,13 @@ public class DistributedPlanner {
     if (!partitionExprs.isEmpty()
         && analyzer.setsHaveValueTransfer(inputPartition.getPartitionExprs(),
         partitionExprs, true)
-        && !(insertStmt.getTargetTable() instanceof KuduTable)) {
+        && !(insertStmt.getTargetTable() instanceof FeKuduTable)) {
       return inputFragment;
     }
 
     // Make a cost-based decision only if no user hint was supplied.
     if (!insertStmt.hasShuffleHint()) {
-      if (insertStmt.getTargetTable() instanceof KuduTable) {
+      if (insertStmt.getTargetTable() instanceof FeKuduTable) {
         // If the table is unpartitioned or all of the partition exprs are constants,
         // don't insert the exchange.
         // TODO: make a more sophisticated decision here for partitioned tables and when
@@ -246,7 +246,7 @@ public class DistributedPlanner {
     DataPartition partition;
     if (partitionExprs.isEmpty()) {
       partition = DataPartition.UNPARTITIONED;
-    } else if (insertStmt.getTargetTable() instanceof KuduTable) {
+    } else if (insertStmt.getTargetTable() instanceof FeKuduTable) {
       partition = DataPartition.kuduPartitioned(
           KuduUtil.createPartitionExpr(insertStmt, ctx_.getRootAnalyzer()));
     } else {

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
index f6d89ad..95b31a2 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
@@ -35,8 +35,8 @@ import org.apache.impala.analysis.SlotDescriptor;
 import org.apache.impala.analysis.SlotRef;
 import org.apache.impala.analysis.StringLiteral;
 import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.KuduColumn;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.thrift.TExplainLevel;
@@ -86,7 +86,7 @@ import com.google.common.collect.Sets;
 public class KuduScanNode extends ScanNode {
   private final static Logger LOG = LoggerFactory.getLogger(KuduScanNode.class);
 
-  private final KuduTable kuduTable_;
+  private final FeKuduTable kuduTable_;
 
   // True if this scan node should use the MT implementation in the backend.
   private boolean useMtScanNode_;
@@ -104,7 +104,7 @@ public class KuduScanNode extends ScanNode {
 
   public KuduScanNode(PlanNodeId id, TupleDescriptor desc, List<Expr> conjuncts) {
     super(id, desc, "SCAN KUDU");
-    kuduTable_ = (KuduTable) desc_.getTable();
+    kuduTable_ = (FeKuduTable) desc_.getTable();
     conjuncts_ = conjuncts;
   }
 

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/planner/Planner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java b/fe/src/main/java/org/apache/impala/planner/Planner.java
index 829325b..9948840 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -32,9 +32,9 @@ import org.apache.impala.analysis.JoinOperator;
 import org.apache.impala.analysis.QueryStmt;
 import org.apache.impala.analysis.SortInfo;
 import org.apache.impala.analysis.TupleId;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.RuntimeEnv;
@@ -184,7 +184,7 @@ public class Planner {
         List<Expr> exprs = Lists.newArrayList();
         FeTable targetTable = insertStmt.getTargetTable();
         Preconditions.checkNotNull(targetTable);
-        if (targetTable instanceof KuduTable) {
+        if (targetTable instanceof FeKuduTable) {
           if (ctx_.isInsert()) {
             // For insert statements on Kudu tables, we only need to consider
             // the labels of columns mentioned in the column list.
@@ -620,7 +620,7 @@ public class Planner {
     List<Expr> orderingExprs = Lists.newArrayList();
 
     boolean partialSort = false;
-    if (insertStmt.getTargetTable() instanceof KuduTable) {
+    if (insertStmt.getTargetTable() instanceof FeKuduTable) {
       // Always sort if the 'clustered' hint is present. Otherwise, don't sort if either
       // the 'noclustered' hint is present, or this is a single node exec, or if the
       // target table is unpartitioned.

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index 9b7111f..45e2351 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -56,9 +56,9 @@ import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.FeDataSourceTable;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
@@ -1311,7 +1311,7 @@ public class SingleNodePlanner {
     } else if (table instanceof HBaseTable) {
       // HBase table
       scanNode = new HBaseScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
-    } else if (tblRef.getTable() instanceof KuduTable) {
+    } else if (tblRef.getTable() instanceof FeKuduTable) {
       scanNode = new KuduScanNode(ctx_.getNextNodeId(), tblRef.getDesc(), conjuncts);
       scanNode.init(analyzer);
       return scanNode;


[04/10] impala git commit: IMPALA-5031: Fix undefined behavior: memset NULL

Posted by ar...@apache.org.
IMPALA-5031: Fix undefined behavior: memset NULL

memset has undefined behavior when its first argument is NULL. The
instance fixed here was found by Clang's undefined behavior sanitizer.

It was found in the end-to-end tests. The interesting part of the
stack trace is:

be/src/util/bitmap.h:78:12: runtime error: null pointer passed as argument 1, which is declared to never be null
/usr/include/string.h:62:79: note: nonnull attribute specified here
    #0 0x2ccb59b in Bitmap::SetAllBits(bool) be/src/util/bitmap.h:78:5
    #1 0x2cb6b9e in NestedLoopJoinNode::ResetMatchingBuildRows(RuntimeState*, long) be/src/exec/nested-loop-join-node.cc:176:27
    #2 0x2cb5ad6 in NestedLoopJoinNode::Open(RuntimeState*) be/src/exec/nested-loop-join-node.cc:90:43

Change-Id: I804f642f4be3b74c24f871f656c5147ee226d2c8
Reviewed-on: http://gerrit.cloudera.org:8080/11042
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/cdc8b9ba
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/cdc8b9ba
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/cdc8b9ba

Branch: refs/heads/master
Commit: cdc8b9ba78e6ac336160a916a630ec2b99e3d9f8
Parents: b76207c
Author: Jim Apple <jb...@apache.org>
Authored: Tue Jul 24 15:40:56 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Wed Jul 25 06:29:14 2018 +0000

----------------------------------------------------------------------
 be/src/util/bitmap.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/cdc8b9ba/be/src/util/bitmap.h
----------------------------------------------------------------------
diff --git a/be/src/util/bitmap.h b/be/src/util/bitmap.h
index b2f7f72..ced824b 100644
--- a/be/src/util/bitmap.h
+++ b/be/src/util/bitmap.h
@@ -20,6 +20,7 @@
 #define IMPALA_UTIL_BITMAP_H
 
 #include "util/bit-util.h"
+#include "util/ubsan.h"
 
 namespace impala {
 
@@ -75,7 +76,7 @@ class Bitmap {
   }
 
   void SetAllBits(bool b) {
-    memset(buffer_.data(), 255 * b, buffer_.size() * sizeof(uint64_t));
+    Ubsan::MemSet(buffer_.data(), 255 * b, buffer_.size() * sizeof(uint64_t));
   }
 
   int64_t num_bits() const { return num_bits_; }


[05/10] impala git commit: IMPALA-5607: Part 1 [DOCS] Return type changes for EXTRACT and DATE_PART

Posted by ar...@apache.org.
IMPALA-5607: Part 1 [DOCS] Return type changes for EXTRACT and DATE_PART

Change-Id: I0a7b1c2f984aac1d6d84480523d07a4125de1f52
Reviewed-on: http://gerrit.cloudera.org:8080/11044
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Reviewed-by: Jinchul Kim <ji...@gmail.com>
Reviewed-by: Tim Armstrong <ta...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/73be1540
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/73be1540
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/73be1540

Branch: refs/heads/master
Commit: 73be15409a6fffe45b1423e847c0d2e737c186ca
Parents: cdc8b9b
Author: Alex Rodoni <ar...@cloudera.com>
Authored: Tue Jul 24 18:22:37 2018 -0700
Committer: Alex Rodoni <ar...@cloudera.com>
Committed: Wed Jul 25 15:46:12 2018 +0000

----------------------------------------------------------------------
 docs/topics/impala_datetime_functions.xml | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/73be1540/docs/topics/impala_datetime_functions.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_datetime_functions.xml b/docs/topics/impala_datetime_functions.xml
index 7c8d5e1..da52cc8 100644
--- a/docs/topics/impala_datetime_functions.xml
+++ b/docs/topics/impala_datetime_functions.xml
@@ -293,7 +293,7 @@ select date_add(cast('2016-01-31' as timestamp), interval 3 months) as 'april_31
           with the argument order reversed. Supports the same date and time units as <codeph>EXTRACT()</codeph>.
           For compatibility with SQL code containing vendor extensions.
           <p>
-            <b>Return type:</b> <codeph>int</codeph>
+            <b>Return type:</b> <codeph>bigint</codeph>
           </p>
           <p conref="../shared/impala_common.xml#common/example_blurb"/>
 <codeblock>
@@ -816,22 +816,24 @@ select now() as right_now, days_sub(now(), 31) as 31_days_ago;
         <dd>
           <indexterm audience="hidden">extract() function</indexterm>
           <b>Purpose:</b> Returns one of the numeric date or time fields from a
-            <codeph>TIMESTAMP</codeph> value.
-          <p>
+            <codeph>TIMESTAMP</codeph> value. <p>
             <b>Unit argument:</b> The <codeph>unit</codeph> string can be one of
               <codeph>epoch</codeph>, <codeph>year</codeph>,
               <codeph>quarter</codeph>, <codeph>month</codeph>,
               <codeph>day</codeph>, <codeph>hour</codeph>,
               <codeph>minute</codeph>, <codeph>second</codeph>, or
               <codeph>millisecond</codeph>. This argument value is
-            case-insensitive.
-          </p>
-          <p rev="2.0.0"> In Impala 2.0 and higher, you can use special syntax
-            rather than a regular function call, for compatibility with code
-            that uses the SQL-99 format with the <codeph>FROM</codeph> keyword.
-            With this style, the unit names are identifiers rather than
-              <codeph>STRING</codeph> literals. For example, the following calls
-            are both equivalent:
+            case-insensitive. </p><p>If you specify <codeph>millisecond</codeph>
+            for the <codeph>unit</codeph> argument, the function returns the
+            seconds component and the milliseconds component. For example,
+              <codeph>extract(cast('2006-05-12 18:27:28.123456789' as
+              timestamp), 'MILLISECOND')</codeph> will return
+              <codeph>28123</codeph>. </p><p rev="2.0.0"> In Impala 2.0 and
+            higher, you can use special syntax rather than a regular function
+            call, for compatibility with code that uses the SQL-99 format with
+            the <codeph>FROM</codeph> keyword. With this style, the unit names
+            are identifiers rather than <codeph>STRING</codeph> literals. For
+            example, the following calls are both equivalent:
             <codeblock>extract(year from now());
 extract(now(), "year");
 </codeblock>
@@ -847,7 +849,7 @@ extract(now(), "year");
               <codeph>TRUNC()</codeph> function instead. </p>
           <p>
             <b>Return type:</b>
-            <codeph>int</codeph>
+            <codeph>bigint</codeph>
           </p>
           <p conref="../shared/impala_common.xml#common/example_blurb"/>
           <codeblock>


[02/10] impala git commit: IMPALA-5826 IMPALA-7162: [DOCS] Documented the IDLE_SESSION_TIMEOUT query option

Posted by ar...@apache.org.
IMPALA-5826 IMPALA-7162: [DOCS] Documented the IDLE_SESSION_TIMEOUT query option

Also, clarified cancelled queries vs closed queries

Change-Id: I37182a3c5cf19fdcbb5f247ed71d43f963143510
Reviewed-on: http://gerrit.cloudera.org:8080/11004
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/21d0c06a
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/21d0c06a
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/21d0c06a

Branch: refs/heads/master
Commit: 21d0c06a4cd1963e386f2467c4c9a499facb5c3f
Parents: 02389d4
Author: Alex Rodoni <ar...@cloudera.com>
Authored: Fri Jul 20 15:09:13 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Tue Jul 24 19:35:25 2018 +0000

----------------------------------------------------------------------
 docs/impala.ditamap                         |   1 +
 docs/shared/impala_common.xml               |  19 +++--
 docs/topics/impala_idle_session_timeout.xml | 100 +++++++++++++++++++++++
 docs/topics/impala_timeouts.xml             |  90 ++++++++++++++------
 4 files changed, 177 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/21d0c06a/docs/impala.ditamap
----------------------------------------------------------------------
diff --git a/docs/impala.ditamap b/docs/impala.ditamap
index 7e8b106..19c72a6 100644
--- a/docs/impala.ditamap
+++ b/docs/impala.ditamap
@@ -192,6 +192,7 @@ under the License.
           <topicref href="topics/impala_explain_level.xml"/>
           <topicref href="topics/impala_hbase_cache_blocks.xml"/>
           <topicref href="topics/impala_hbase_caching.xml"/>
+          <topicref href="topics/impala_idle_session_timeout.xml"/>
           <topicref href="topics/impala_kudu_read_mode.xml"/>
           <topicref href="topics/impala_live_progress.xml"/>
           <topicref href="topics/impala_live_summary.xml"/>

http://git-wip-us.apache.org/repos/asf/impala/blob/21d0c06a/docs/shared/impala_common.xml
----------------------------------------------------------------------
diff --git a/docs/shared/impala_common.xml b/docs/shared/impala_common.xml
index abf00b8..7dbb9bf 100644
--- a/docs/shared/impala_common.xml
+++ b/docs/shared/impala_common.xml
@@ -3771,13 +3771,20 @@ sudo pip-python install ssl</codeblock>
 
       <note id="timeout_clock_blurb">
         <p>
-          The timeout clock for queries and sessions only starts ticking when the query or session is idle.
-          For queries, this means the query has results ready but is waiting for a client to fetch the data. A
-          query can run for an arbitrary time without triggering a timeout, because the query is computing results
-          rather than sitting idle waiting for the results to be fetched. The timeout period is intended to prevent
-          unclosed queries from consuming resources and taking up slots in the admission count of running queries,
-          potentially preventing other queries from starting.
+          The timeout clock for queries and sessions only starts ticking when
+          the query or session is idle.
         </p>
+
+        <p>
+          For queries, this means the query has results ready but is waiting
+          for a client to fetch the data. A query can run for an arbitrary time
+          without triggering a timeout, because the query is computing results
+          rather than sitting idle waiting for the results to be fetched. The
+          timeout period is intended to prevent unclosed queries from consuming
+          resources and taking up slots in the admission count of running
+          queries, potentially preventing other queries from starting.
+        </p>
+
         <p>
           For sessions, this means that no query has been submitted for some period of time.
         </p>

http://git-wip-us.apache.org/repos/asf/impala/blob/21d0c06a/docs/topics/impala_idle_session_timeout.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_idle_session_timeout.xml b/docs/topics/impala_idle_session_timeout.xml
new file mode 100644
index 0000000..ac718ed
--- /dev/null
+++ b/docs/topics/impala_idle_session_timeout.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
+<concept rev="2.12.0" id="idle_session_timeout">
+
+  <title>IDLE_SESSION_TIMEOUT Query Option (<keyword keyref="impala212_full"/> or higher only)</title>
+
+  <titlealts audience="PDF">
+
+    <navtitle>IDLE_SESSION_TIMEOUT</navtitle>
+
+  </titlealts>
+
+  <prolog>
+    <metadata>
+      <data name="Category" value="Impala"/>
+      <data name="Category" value="Impala Query Options"/>
+      <data name="Category" value="Querying"/>
+      <data name="Category" value="Developers"/>
+      <data name="Category" value="Data Analysts"/>
+    </metadata>
+  </prolog>
+
+  <conbody>
+
+    <p rev="2.12.0">
+      The <codeph>IDLE_SESSION_TIMEOUT</codeph> query option sets the time in seconds after
+      which an idle session is cancelled. A session is idle when no activity is occurring for
+      any of the queries in that session, and the session has not started any new queries. Once
+      a session is expired, you cannot issue any new query requests to it. The session remains
+      open, but the only operation you can perform is to close it.
+    </p>
+
+    <p rev="2.12.0">
+      The <codeph>IDLE_SESSION_TIMEOUT</codeph> query option overrides the
+      <codeph>--idle_session_timeout</codeph> startup option. See
+      <xref href="impala_timeouts.xml#timeouts"/> for the
+      <codeph>--idle_session_timeout</codeph> startup option.
+    </p>
+
+    <p>
+      The <codeph>IDLE_SESSION_TIMEOUT</codeph> query option allows JDBC/ODBC connections to set
+      the session timeout as a query option with the <codeph>SET</codeph> statement.
+    </p>
+
+    <p>
+      <b>Syntax:</b>
+    </p>
+
+<codeblock>SET IDLE_SESSION_TIMEOUT=<varname>seconds</varname>;</codeblock>
+
+    <p>
+      <b>Type:</b> numeric
+    </p>
+
+    <p>
+      <b>Default:</b> 0
+      <ul>
+        <li>
+          If <codeph>--idle_session_timeout</codeph> is not set, the session never expires.
+        </li>
+
+        <li>
+          If <codeph>--idle_session_timeout</codeph> is set, use that timeout value.
+        </li>
+      </ul>
+    </p>
+
+    <p>
+      <b>Added in:</b> <keyword keyref="impala212_full"/>
+    </p>
+
+    <p>
+      <b>Related information:</b>
+    </p>
+
+    <p>
+      <xref href="impala_timeouts.xml#timeouts"/>
+    </p>
+
+  </conbody>
+
+</concept>

http://git-wip-us.apache.org/repos/asf/impala/blob/21d0c06a/docs/topics/impala_timeouts.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_timeouts.xml b/docs/topics/impala_timeouts.xml
index 80e5c9b..3389f5a 100644
--- a/docs/topics/impala_timeouts.xml
+++ b/docs/topics/impala_timeouts.xml
@@ -96,38 +96,59 @@ Trying to re-register with state-store</codeblock>
       <note conref="../shared/impala_common.xml#common/timeout_clock_blurb"/>
 
       <p>
-        Specify the following startup options for the <cmdname>impalad</cmdname> daemon:
+        Use the following startup options for the <cmdname>impalad</cmdname>
+        daemon to specify timeout values:
       </p>
 
       <ul>
-        <li>
+        <li><codeph>--idle_query_timeout</codeph>
           <p>
-            The <codeph>--idle_query_timeout</codeph> option specifies the time in seconds after
-            which an idle query is cancelled. This could be a query whose results were all fetched
-            but was never closed, or one whose results were partially fetched and then the client
-            program stopped requesting further results. This condition is most likely to occur in
-            a client program using the JDBC or ODBC interfaces, rather than in the interactive
-            <cmdname>impala-shell</cmdname> interpreter. Once the query is cancelled, the client
-            program cannot retrieve any further results.
+            Specifies the time in
+            seconds after which an idle query is cancelled. This could be a
+            query whose results were all fetched but was never closed, or one
+            whose results were partially fetched and then the client program
+            stopped requesting further results. This condition is most likely to
+            occur in a client program using the JDBC or ODBC interfaces, rather
+            than in the interactive <cmdname>impala-shell</cmdname> interpreter.
+            Once a query is cancelled, the client program cannot retrieve any
+            further results from the query.
           </p>
-
           <p rev="2.0.0">
-            You can reduce the idle query timeout by using the <codeph>QUERY_TIMEOUT_S</codeph>
-            query option. Any non-zero value specified for the <codeph>--idle_query_timeout</codeph> startup
-            option serves as an upper limit for the <codeph>QUERY_TIMEOUT_S</codeph> query option.
-            A zero value for <codeph>--idle_query_timeout</codeph> disables query timeouts.
-            See <xref href="impala_query_timeout_s.xml#query_timeout_s"/> for details.
+            You can reduce
+            the idle query timeout by using the <codeph>QUERY_TIMEOUT_S</codeph>
+            query option. Any non-zero value specified for the
+              <codeph>--idle_query_timeout</codeph> startup option serves as an
+            upper limit for the <codeph>QUERY_TIMEOUT_S</codeph> query option.
+            See <xref href="impala_query_timeout_s.xml#query_timeout_s"/> about
+            the query option.
+          </p>
+          <p rev="2.0.0">A zero value for
+              <codeph>--idle_query_timeout</codeph> disables query timeouts.
+          </p>
+          <p>
+            Cancelled queries remain in the open state but use only the
+            minimal resources.
           </p>
         </li>
 
-        <li>
+        <li><codeph>--idle_session_timeout</codeph>
+          <p>
+            Specifies the time in
+            seconds after which an idle session expires. A session is idle when
+            no activity is occurring for any of the queries in that session, and
+            the session has not started any new queries. Once a session is
+            expired, you cannot issue any new query requests to it. The session
+            remains open, but the only operation you can perform is to close it.
+          </p>
           <p>
-            The <codeph>--idle_session_timeout</codeph> option specifies the time in seconds after
-            which an idle session is expired. A session is idle when no activity is occurring for
-            any of the queries in that session, and the session has not started any new queries.
-            Once a session is expired, you cannot issue any new query requests to it. The session
-            remains open, but the only operation you can perform is to close it. The default value
-            of 0 means that sessions never expire.
+            The default value of 0 specifies sessions never
+            expire.
+          </p>
+          <p rev="2.12.0">
+            You can override the
+              <codeph>--idle_session_timeout</codeph> value with the <xref
+              href="impala_idle_session_timeout.xml#idle_session_timeout"/> at
+            the session level.
           </p>
         </li>
       </ul>
@@ -185,11 +206,26 @@ Trying to re-register with state-store</codeblock>
     <conbody>
 
       <p>
-        Sometimes, an Impala query might run for an unexpectedly long time, tying up resources
-        in the cluster. You can cancel the query explicitly, independent of the timeout period,
-        by going into the web UI for the <cmdname>impalad</cmdname> host (on port 25000 by
-        default), and using the link on the <codeph>/queries</codeph> tab to cancel the running
-        query. For example, press <codeph>^C</codeph> in <cmdname>impala-shell</cmdname>.
+        Sometimes, an Impala query might run for an unexpectedly long time,
+        tying up resources in the cluster. You can cancel the query explicitly,
+        independent of the timeout period, by going into the web UI for the
+          <cmdname>impalad</cmdname> host (on port 25000 by default), and using
+        the link on the <codeph>/queries</codeph> tab to cancel the running
+        query.
+      </p>
+
+      <p>
+        Various client applications let you interactively cancel queries
+        submitted or monitored through those applications. For example:
+        <ul>
+          <li>
+            Press <systemoutput>^C</systemoutput> in
+            <cmdname>impala-shell</cmdname>.
+          </li>
+          <li>
+            Click <b>Cancel</b> from the <b>Watch</b>page in Hue.
+          </li>
+        </ul>
       </p>
 
     </conbody>


[10/10] impala git commit: IMPALA-7277. Support INSERT and LOAD DATA statements in LocalCatalog

Posted by ar...@apache.org.
IMPALA-7277. Support INSERT and LOAD DATA statements in LocalCatalog

This adds support for INSERT and LOAD DATA statements when LocalCatalog
is enabled by fixing the following items:

* Remove some downcasts to HdfsTable in various Stmt classes and replace
  them with casts to FeFsTable.
* Stub out the "write access" checks to fake always being writable. Left
  a TODO to properly implement these.
* Implemented various 'getPartition()' calls which take a user-provided
  partition specification, used by INSERT and LOAD statements that
  specify an explicit target partition.
* Fixed the "prototype partition" created by LocalFsTable to not include
  a location field. This fixed insertion into dynamic partitions.

Additionally fixed a couple other issues which were exercised by the
e2e test coverage for load/insert:

* The LocalCatalog getDb() and getTable() calls were previously assuming
  that all callers passed pre-canonicalized table names, but that wasn't
  the case. This adds the necessary toLower() calls so that statements
  referencing capitalized table names work.

With this patch, most of the associated e2e tests pass, with the
exception of those that check permissions levels of the target
directories. Those calls are still stubbed out.

Overall, running 'run-tests.py -k "not hbase and not avro"' results in
about a 90% pass rate after this patch:

=====================================================
186 failed, 1657 passed, 56 skipped, 33 xfailed, 1 xpassed in 512.75 seconds
=====================================================

Remaining test failures seem mostly unrelated to the above changes and
will be addressed in continuing patches.

Change-Id: I4ae47a0b58022ed77abe51d2596c2b1d0111fae3
Reviewed-on: http://gerrit.cloudera.org:8080/10914
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Reviewed-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/cec33fa0
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/cec33fa0
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/cec33fa0

Branch: refs/heads/master
Commit: cec33fa0ae75392668273d40b5a1bc4bbd7e9e2e
Parents: ba81386
Author: Todd Lipcon <to...@cloudera.com>
Authored: Tue Jul 10 16:24:41 2018 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Wed Jul 25 19:27:26 2018 +0000

----------------------------------------------------------------------
 .../AlterTableRecoverPartitionsStmt.java        |  6 ++--
 .../analysis/AlterTableSetCachedStmt.java       |  7 ++---
 .../analysis/AlterTableSetLocationStmt.java     |  6 ++--
 .../analysis/AlterTableSetRowFormatStmt.java    |  6 ++--
 .../apache/impala/analysis/BaseTableRef.java    |  9 +++---
 .../impala/analysis/ComputeStatsStmt.java       |  7 ++---
 .../org/apache/impala/analysis/InsertStmt.java  | 20 ++++++------
 .../apache/impala/analysis/LoadDataStmt.java    | 21 +++++++------
 .../apache/impala/analysis/PartitionSpec.java   |  4 +--
 .../impala/analysis/PartitionSpecBase.java      |  8 ++---
 .../org/apache/impala/analysis/TableRef.java    |  6 ++--
 .../org/apache/impala/analysis/ToSqlUtils.java  |  4 +--
 .../apache/impala/analysis/TupleDescriptor.java |  6 ++--
 .../org/apache/impala/catalog/FeFsTable.java    | 17 +++++++++++
 .../org/apache/impala/catalog/HdfsTable.java    | 29 +++++++++++++-----
 .../impala/catalog/local/LocalCatalog.java      | 32 +++++++++++++++++---
 .../apache/impala/catalog/local/LocalDb.java    |  2 +-
 .../impala/catalog/local/LocalFsPartition.java  |  7 +++--
 .../impala/catalog/local/LocalFsTable.java      | 22 +++++++++++++-
 19 files changed, 148 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
index efb8a70..d04f042 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
@@ -17,7 +17,7 @@
 
 package org.apache.impala.analysis;
 
-import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableType;
@@ -42,8 +42,8 @@ public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
 
-    // Make sure the target table is HdfsTable.
-    if (!(table_ instanceof HdfsTable)) {
+    // Make sure the target table is an FS-backed Table.
+    if (!(table_ instanceof FeFsTable)) {
       throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS " +
           "must target an HDFS table: " + tableName_);
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
index 178725d..0bcd2bf 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -19,10 +19,9 @@ package org.apache.impala.analysis;
 
 import java.util.List;
 
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetCachedParams;
@@ -63,7 +62,7 @@ public class AlterTableSetCachedStmt extends AlterTableSetStmt {
 
     FeTable table = getTargetTable();
     Preconditions.checkNotNull(table);
-    if (!(table instanceof HdfsTable)) {
+    if (!(table instanceof FeFsTable)) {
       throw new AnalysisException("ALTER TABLE SET [CACHED|UNCACHED] must target an " +
           "HDFS table: " + table.getFullName());
     }
@@ -71,7 +70,7 @@ public class AlterTableSetCachedStmt extends AlterTableSetStmt {
     if (cacheOp_.shouldCache()) {
       boolean isCacheable = true;
       PartitionSet partitionSet = getPartitionSet();
-      HdfsTable hdfsTable = (HdfsTable)table;
+      FeFsTable hdfsTable = (FeFsTable)table;
       StringBuilder nameSb = new StringBuilder();
       if (partitionSet != null) {
         List<? extends FeFsPartition> parts = partitionSet.getPartitions();

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index b034ab5..cb46493 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -23,10 +23,10 @@ import java.util.List;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetLocationParams;
@@ -77,8 +77,8 @@ public class AlterTableSetLocationStmt extends AlterTableSetStmt {
 
     FeTable table = getTargetTable();
     Preconditions.checkNotNull(table);
-    if (table instanceof HdfsTable) {
-      HdfsTable hdfsTable = (HdfsTable) table;
+    if (table instanceof FeFsTable) {
+      FeFsTable hdfsTable = (FeFsTable) table;
       if (getPartitionSet() != null) {
         // Targeting a partition rather than a table.
         List<? extends FeFsPartition> partitions = getPartitionSet().getPartitions();

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
index cdc71b3..a8c9fea 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
@@ -18,9 +18,9 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.RowFormat;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -58,7 +58,7 @@ public class AlterTableSetRowFormatStmt extends AlterTableSetStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
     FeTable tbl = getTargetTable();
-    if (!(tbl instanceof HdfsTable)) {
+    if (!(tbl instanceof FeFsTable)) {
       throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is only " +
           "supported on HDFS tables. Conflicting table: %1$s", tbl.getFullName()));
     }
@@ -75,7 +75,7 @@ public class AlterTableSetRowFormatStmt extends AlterTableSetStmt {
       }
     } else {
       HdfsFileFormat format = HdfsFileFormat.fromHdfsInputFormatClass(
-          ((HdfsTable) tbl).getMetaStoreTable().getSd().getInputFormat());
+          ((FeFsTable) tbl).getMetaStoreTable().getSd().getInputFormat());
       if (format != HdfsFileFormat.TEXT &&
           format != HdfsFileFormat.SEQUENCE_FILE) {
         throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is " +

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
index 3fbc612..5b80712 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
@@ -17,9 +17,10 @@
 
 package org.apache.impala.analysis;
 
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
+
 import com.google.common.base.Preconditions;
 
 /**
@@ -90,11 +91,11 @@ public class BaseTableRef extends TableRef {
    */
   private void analyzeSkipHeaderLineCount() throws AnalysisException {
     FeTable table = getTable();
-    if (!(table instanceof HdfsTable)) return;
-    HdfsTable hdfsTable = (HdfsTable)table;
+    if (!(table instanceof FeFsTable)) return;
+    FeFsTable fsTable = (FeFsTable)table;
 
     StringBuilder error = new StringBuilder();
-    hdfsTable.parseSkipHeaderLineCount(error);
+    fsTable.parseSkipHeaderLineCount(error);
     if (error.length() > 0) throw new AnalysisException(error.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 4f1173e..9ce7b0d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -33,7 +33,6 @@ import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.Type;
@@ -496,8 +495,8 @@ public class ComputeStatsStmt extends StatementBase {
     } else {
       // Not computing incremental stats.
       expectAllPartitions_ = true;
-      if (table_ instanceof HdfsTable) {
-        expectAllPartitions_ = !((HdfsTable) table_).isStatsExtrapolationEnabled();
+      if (table_ instanceof FeFsTable) {
+        expectAllPartitions_ = !((FeFsTable) table_).isStatsExtrapolationEnabled();
       }
     }
 
@@ -623,7 +622,7 @@ public class ComputeStatsStmt extends StatementBase {
     }
 
     // Compute effective sampling percent.
-    long totalFileBytes = ((HdfsTable)table_).getTotalHdfsBytes();
+    long totalFileBytes = ((FeFsTable)table_).getTotalHdfsBytes();
     if (totalFileBytes > 0) {
       effectiveSamplePerc_ = (double) sampleFileBytes / (double) totalFileBytes;
     } else {

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index 72ec828..55da237 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -26,11 +26,11 @@ import java.util.Set;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
@@ -474,28 +474,28 @@ public class InsertStmt extends StatementBase {
       }
     }
 
-    if (table_ instanceof HdfsTable) {
-      HdfsTable hdfsTable = (HdfsTable) table_;
-      if (!hdfsTable.hasWriteAccess()) {
+    if (table_ instanceof FeFsTable) {
+      FeFsTable fsTable = (FeFsTable) table_;
+      if (!fsTable.hasWriteAccess()) {
         throw new AnalysisException(String.format("Unable to INSERT into target table " +
             "(%s) because Impala does not have WRITE access to at least one HDFS path" +
-            ": %s", targetTableName_, hdfsTable.getFirstLocationWithoutWriteAccess()));
+            ": %s", targetTableName_, fsTable.getFirstLocationWithoutWriteAccess()));
       }
       StringBuilder error = new StringBuilder();
-      hdfsTable.parseSkipHeaderLineCount(error);
+      fsTable.parseSkipHeaderLineCount(error);
       if (error.length() > 0) throw new AnalysisException(error.toString());
       try {
-        if (!FileSystemUtil.isImpalaWritableFilesystem(hdfsTable.getLocation())) {
+        if (!FileSystemUtil.isImpalaWritableFilesystem(fsTable.getLocation())) {
           throw new AnalysisException(String.format("Unable to INSERT into target " +
               "table (%s) because %s is not a supported filesystem.", targetTableName_,
-              hdfsTable.getLocation()));
+              fsTable.getLocation()));
         }
       } catch (IOException e) {
         throw new AnalysisException(String.format("Unable to INSERT into target " +
             "table (%s): %s.", targetTableName_, e.getMessage()), e);
       }
       for (int colIdx = 0; colIdx < numClusteringCols; ++colIdx) {
-        Column col = hdfsTable.getColumns().get(colIdx);
+        Column col = fsTable.getColumns().get(colIdx);
         // Hive 1.x has a number of issues handling BOOLEAN partition columns (see HIVE-6590).
         // Instead of working around the Hive bugs, INSERT is disabled for BOOLEAN
         // partitions in Impala when built against Hive 1. HIVE-6590 is currently resolved,
@@ -798,7 +798,7 @@ public class InsertStmt extends StatementBase {
    * an AnalysisException.
    */
   private void analyzeSortColumns() throws AnalysisException {
-    if (!(table_ instanceof HdfsTable)) return;
+    if (!(table_ instanceof FeFsTable)) return;
 
     sortColumns_ = AlterTableSetTblProperties.analyzeSortColumns(table_,
         table_.getMetaStoreTable().getParameters());

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index 43ca216..6732f1f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -28,8 +28,10 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeCatalogUtils;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -105,7 +107,7 @@ public class LoadDataStmt extends StatementBase {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     dbName_ = analyzer.getTargetDbName(tableName_);
     FeTable table = analyzer.getTable(tableName_, Privilege.INSERT);
-    if (!(table instanceof HdfsTable)) {
+    if (!(table instanceof FeFsTable)) {
       throw new AnalysisException("LOAD DATA only supported for HDFS tables: " +
           dbName_ + "." + getTbl());
     }
@@ -122,7 +124,7 @@ public class LoadDataStmt extends StatementBase {
             "specified: " + dbName_ + "." + getTbl());
       }
     }
-    analyzePaths(analyzer, (HdfsTable) table);
+    analyzePaths(analyzer, (FeFsTable) table);
   }
 
   /**
@@ -134,7 +136,7 @@ public class LoadDataStmt extends StatementBase {
    * We don't check permissions for the S3AFileSystem and the AdlFileSystem due to
    * limitations with thier getAclStatus() API. (see HADOOP-13892 and HADOOP-14437)
    */
-  private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable)
+  private void analyzePaths(Analyzer analyzer, FeFsTable table)
       throws AnalysisException {
     // The user must have permission to access the source location. Since the files will
     // be moved from this location, the user needs to have all permission.
@@ -200,11 +202,12 @@ public class LoadDataStmt extends StatementBase {
 
       String noWriteAccessErrorMsg = String.format("Unable to LOAD DATA into " +
           "target table (%s) because Impala does not have WRITE access to HDFS " +
-          "location: ", hdfsTable.getFullName());
+          "location: ", table.getFullName());
 
       if (partitionSpec_ != null) {
-        HdfsPartition partition = hdfsTable.getPartition(
-            partitionSpec_.getPartitionSpecKeyValues());
+        long partId = HdfsTable.getPartition(table,
+            partitionSpec_.getPartitionSpecKeyValues()).getId();
+        FeFsPartition partition = FeCatalogUtils.loadPartition(table, partId);
         String location = partition.getLocation();
         if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
           throw new AnalysisException(noWriteAccessErrorMsg + location);
@@ -212,8 +215,8 @@ public class LoadDataStmt extends StatementBase {
       } else {
         // No specific partition specified, so we need to check write access
         // on the table as a whole.
-        if (!hdfsTable.hasWriteAccess()) {
-          throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
+        if (!table.hasWriteAccess()) {
+          throw new AnalysisException(noWriteAccessErrorMsg + table.getLocation());
         }
       }
     } catch (FileNotFoundException e) {

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
index 1d76df6..1a82dca 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
@@ -18,13 +18,13 @@
 package org.apache.impala.analysis;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TPartitionKeyValue;
@@ -110,7 +110,7 @@ public class PartitionSpec extends PartitionSpecBase {
             pk.getValue().toSql(), colType.toString(), pk.getColName()));
       }
     }
-    partitionExists_ = table_.getPartition(partitionSpec_) != null;
+    partitionExists_ = HdfsTable.getPartition(table_, partitionSpec_) != null;
     if (partitionShouldExist_ != null) {
       if (partitionShouldExist_ && !partitionExists_) {
           throw new AnalysisException("Partition spec does not exist: (" +

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
index ede438b..ccdb266 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
@@ -19,8 +19,8 @@ package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
@@ -29,7 +29,7 @@ import com.google.common.base.Preconditions;
  * specifications of related DDL operations.
  */
 public abstract class PartitionSpecBase implements ParseNode {
-  protected HdfsTable table_;
+  protected FeFsTable table_;
   protected TableName tableName_;
   protected Boolean partitionShouldExist_;
   protected Privilege privilegeRequirement_;
@@ -85,8 +85,8 @@ public abstract class PartitionSpecBase implements ParseNode {
     }
 
     // Only HDFS tables are partitioned.
-    Preconditions.checkState(table instanceof HdfsTable);
-    table_ = (HdfsTable) table;
+    Preconditions.checkState(table instanceof FeFsTable);
+    table_ = (FeFsTable) table;
     nullPartitionKeyValue_ = table_.getNullPartitionKeyValue();
   }
 

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/TableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index 98861d1..8ccc9ab 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -22,8 +22,8 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.planner.JoinNode.DistributionMode;
 import org.apache.impala.rewrite.ExprRewriter;
@@ -353,7 +353,7 @@ public class TableRef implements ParseNode {
     if (sampleParams_ == null) return;
     sampleParams_.analyze(analyzer);
     if (!(this instanceof BaseTableRef)
-        || !(resolvedPath_.destTable() instanceof HdfsTable)) {
+        || !(resolvedPath_.destTable() instanceof FeFsTable)) {
       throw new AnalysisException(
           "TABLESAMPLE is only supported on HDFS tables: " + getUniqueAlias());
     }
@@ -376,7 +376,7 @@ public class TableRef implements ParseNode {
     // BaseTableRef will always have their path resolved at this point.
     Preconditions.checkState(getResolvedPath() != null);
     if (getResolvedPath().destTable() != null &&
-        !(getResolvedPath().destTable() instanceof HdfsTable)) {
+        !(getResolvedPath().destTable() instanceof FeFsTable)) {
       analyzer.addWarning("Table hints only supported for Hdfs tables");
     }
     for (PlanHint hint: tableHints_) {

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index a669caf..dca6b94 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.ql.parse.HiveLexer;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
@@ -48,7 +49,6 @@ import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsCompression;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.RowFormat;
@@ -286,7 +286,7 @@ public class ToSqlUtils {
         // We shouldn't output the columns for external tables
         colsSql = null;
       }
-    } else if (table instanceof HdfsTable) {
+    } else if (table instanceof FeFsTable) {
       String inputFormat = msTable.getSd().getInputFormat();
       format = HdfsFileFormat.fromHdfsInputFormatClass(inputFormat);
       compression = HdfsCompression.fromHdfsInputFormatClass(inputFormat);

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index 22693ad..bf2d310 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -26,9 +26,9 @@ import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.StructType;
 import org.apache.impala.thrift.TTupleDescriptor;
 
@@ -335,9 +335,9 @@ public class TupleDescriptor {
    */
   public boolean hasClusteringColsOnly() {
     FeTable table = getTable();
-    if (!(table instanceof HdfsTable) || table.getNumClusteringCols() == 0) return false;
+    if (!(table instanceof FeFsTable) || table.getNumClusteringCols() == 0) return false;
 
-    HdfsTable hdfsTable = (HdfsTable)table;
+    FeFsTable hdfsTable = (FeFsTable)table;
     for (SlotDescriptor slotDesc: getSlots()) {
       if (!slotDesc.isMaterialized()) continue;
       if (slotDesc.getColumn() == null ||

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
index 73a7ffe..86b41f0 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -103,6 +103,22 @@ public interface FeFsTable extends FeTable {
   public HdfsFileFormat getMajorityFormat();
 
   /**
+   * Return true if the table may be written to.
+   */
+  public boolean hasWriteAccess();
+
+  /**
+   * Return some location found without write access for this table, useful
+   * in error messages about insufficient permissions to insert into a table.
+   *
+   * In case multiple locations are missing write access, the particular
+   * location returned is implementation-defined.
+   *
+   * Returns null if all partitions have write access.
+   */
+  public String getFirstLocationWithoutWriteAccess();
+
+  /**
    * @param totalBytes_ the known number of bytes in the table
    * @return Returns an estimated row count for the given number of file bytes
    */
@@ -181,4 +197,5 @@ public interface FeFsTable extends FeTable {
    * @return the index of hosts that store replicas of blocks of this table.
    */
   ListMap<TNetworkAddress> getHostIndex();
+
  }

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index b4bd707..0995086 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -631,6 +631,7 @@ public class HdfsTable extends Table implements FeFsTable {
   // True if Impala has HDFS write permissions on the hdfsBaseDir (for an unpartitioned
   // table) or if Impala has write permissions on all partition directories (for
   // a partitioned table).
+  @Override
   public boolean hasWriteAccess() {
     return TAccessLevelUtil.impliesWriteAccess(accessLevel_);
   }
@@ -640,6 +641,7 @@ public class HdfsTable extends Table implements FeFsTable {
    * to, or an null if none is found. For an unpartitioned table, this just
    * checks the hdfsBaseDir. For a partitioned table it checks all partition directories.
    */
+  @Override
   public String getFirstLocationWithoutWriteAccess() {
     if (getMetaStoreTable() == null) return null;
 
@@ -662,13 +664,18 @@ public class HdfsTable extends Table implements FeFsTable {
    * was found.
    */
   public HdfsPartition getPartition(List<PartitionKeyValue> partitionSpec) {
+    return (HdfsPartition)getPartition(this, partitionSpec);
+  }
+
+  public static PrunablePartition getPartition(FeFsTable table,
+      List<PartitionKeyValue> partitionSpec) {
     List<TPartitionKeyValue> partitionKeyValues = Lists.newArrayList();
     for (PartitionKeyValue kv: partitionSpec) {
       String value = PartitionKeyValue.getPartitionKeyValueString(
-          kv.getLiteralValue(), getNullPartitionKeyValue());
+          kv.getLiteralValue(), table.getNullPartitionKeyValue());
       partitionKeyValues.add(new TPartitionKeyValue(kv.getColName(), value));
     }
-    return getPartitionFromThriftPartitionSpec(partitionKeyValues);
+    return getPartitionFromThriftPartitionSpec(table, partitionKeyValues);
   }
 
   /**
@@ -677,11 +684,17 @@ public class HdfsTable extends Table implements FeFsTable {
    */
   public HdfsPartition getPartitionFromThriftPartitionSpec(
       List<TPartitionKeyValue> partitionSpec) {
-    // First, build a list of the partition values to search for in the same order they
+    return (HdfsPartition)getPartitionFromThriftPartitionSpec(this, partitionSpec);
+  }
+
+  public static PrunablePartition getPartitionFromThriftPartitionSpec(
+      FeFsTable table,
+      List<TPartitionKeyValue> partitionSpec) {
+      // First, build a list of the partition values to search for in the same order they
     // are defined in the table.
     List<String> targetValues = Lists.newArrayList();
     Set<String> keys = Sets.newHashSet();
-    for (FieldSchema fs: getMetaStoreTable().getPartitionKeys()) {
+    for (FieldSchema fs: table.getMetaStoreTable().getPartitionKeys()) {
       for (TPartitionKeyValue kv: partitionSpec) {
         if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
           targetValues.add(kv.getValue());
@@ -695,27 +708,27 @@ public class HdfsTable extends Table implements FeFsTable {
 
     // Make sure the number of values match up and that some values were found.
     if (targetValues.size() == 0 ||
-        (targetValues.size() != getMetaStoreTable().getPartitionKeysSize())) {
+        (targetValues.size() != table.getMetaStoreTable().getPartitionKeysSize())) {
       return null;
     }
 
     // Search through all the partitions and check if their partition key values
     // match the values being searched for.
-    for (HdfsPartition partition: partitionMap_.values()) {
+    for (PrunablePartition partition: table.getPartitions()) {
       List<LiteralExpr> partitionValues = partition.getPartitionValues();
       Preconditions.checkState(partitionValues.size() == targetValues.size());
       boolean matchFound = true;
       for (int i = 0; i < targetValues.size(); ++i) {
         String value;
         if (partitionValues.get(i) instanceof NullLiteral) {
-          value = getNullPartitionKeyValue();
+          value = table.getNullPartitionKeyValue();
         } else {
           value = partitionValues.get(i).getStringValue();
           Preconditions.checkNotNull(value);
           // See IMPALA-252: we deliberately map empty strings on to
           // NULL when they're in partition columns. This is for
           // backwards compatibility with Hive, and is clearly broken.
-          if (value.isEmpty()) value = getNullPartitionKeyValue();
+          if (value.isEmpty()) value = table.getNullPartitionKeyValue();
         }
         if (!targetValues.get(i).equals(value)) {
           matchFound = false;

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
index 0d376b3..c630b5c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
@@ -31,20 +31,25 @@ import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.DatabaseNotFoundException;
 import org.apache.impala.catalog.Db;
 import org.apache.impala.catalog.FeCatalog;
+import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDataSource;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Function.CompareMode;
-import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.catalog.HdfsCachePool;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.PartitionNotFoundException;
+import org.apache.impala.catalog.PrunablePartition;
 import org.apache.impala.thrift.TCatalogObject;
 import org.apache.impala.thrift.TPartitionKeyValue;
 import org.apache.impala.thrift.TUniqueId;
 import org.apache.impala.util.PatternMatcher;
 import org.apache.thrift.TException;
 
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
 
@@ -131,7 +136,7 @@ public class LocalCatalog implements FeCatalog {
   @Override
   public FeDb getDb(String db) {
     loadDbs();
-    return dbs_.get(db);
+    return dbs_.get(db.toLowerCase());
   }
 
   private FeDb getDbOrThrow(String dbName) throws DatabaseNotFoundException {
@@ -143,11 +148,28 @@ public class LocalCatalog implements FeCatalog {
     return db;
   }
 
+  private void throwPartitionNotFound(List<TPartitionKeyValue> partitionSpec)
+      throws PartitionNotFoundException {
+    throw new PartitionNotFoundException(
+        "Partition not found: " + Joiner.on(", ").join(partitionSpec));
+  }
+
   @Override
   public FeFsPartition getHdfsPartition(
-      String db, String tbl, List<TPartitionKeyValue> partition_spec)
+      String db, String tbl, List<TPartitionKeyValue> partitionSpec)
       throws CatalogException {
-    throw new UnsupportedOperationException("TODO");
+    // TODO(todd): somewhat copy-pasted from Catalog.getHdfsPartition
+
+    FeTable table = getTable(db, tbl);
+    // This is not an FS table, throw an error.
+    if (!(table instanceof FeFsTable)) {
+      throwPartitionNotFound(partitionSpec);
+    }
+    // Get the FeFsPartition object for the given partition spec.
+    PrunablePartition partition = HdfsTable.getPartitionFromThriftPartitionSpec(
+        (FeFsTable)table, partitionSpec);
+    if (partition == null) throwPartitionNotFound(partitionSpec);
+    return FeCatalogUtils.loadPartition((FeFsTable)table, partition.getId());
   }
 
   @Override
@@ -237,4 +259,4 @@ public class LocalCatalog implements FeCatalog {
   MetaProvider getMetaProvider() {
     return metaProvider_;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
index 6b9209f..136093a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
@@ -94,7 +94,7 @@ class LocalDb implements FeDb {
   @Override
   public FeTable getTable(String tblName) {
     Preconditions.checkNotNull(tblName);
-    Preconditions.checkArgument(tblName.toLowerCase().equals(tblName));
+    tblName = tblName.toLowerCase();
     loadTableNames();
     if (!tables_.containsKey(tblName)) {
       // Table doesn't exist.

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
index c5510d5..2e35ce8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
@@ -102,9 +102,12 @@ public class LocalFsPartition implements FeFsPartition {
 
   @Override
   public THdfsPartitionLocation getLocationAsThrift() {
+    String loc = getLocation();
+    // The special "prototype partition" has a null location.
+    if (loc == null) return null;
     // TODO(todd): support prefix-compressed partition locations. For now,
     // using -1 indicates that the location is a full path string.
-    return new THdfsPartitionLocation(/*prefix_index=*/-1, getLocation());
+    return new THdfsPartitionLocation(/*prefix_index=*/-1, loc);
   }
 
   @Override
@@ -115,7 +118,7 @@ public class LocalFsPartition implements FeFsPartition {
   @Override
   public TAccessLevel getAccessLevel() {
     // TODO(todd): implement me
-    return TAccessLevel.READ_ONLY;
+    return TAccessLevel.READ_WRITE;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/impala/blob/cec33fa0/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index 81dcae1..35044e3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.impala.analysis.LiteralExpr;
@@ -188,6 +189,18 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
   }
 
   @Override
+  public boolean hasWriteAccess() {
+    // TODO(todd): implement me properly
+    return true;
+  }
+
+  @Override
+  public String getFirstLocationWithoutWriteAccess() {
+    // TODO(todd): implement me properly
+    return null;
+  }
+
+  @Override
   public long getExtrapolatedNumRows(long totalBytes) {
     // TODO Auto-generated method stub
     return -1;
@@ -239,7 +252,14 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
 
   private LocalFsPartition createPrototypePartition() {
     Partition protoMsPartition = new Partition();
-    protoMsPartition.setSd(getMetaStoreTable().getSd());
+
+    // The prototype partition should not have a location set in its storage
+    // descriptor, or else all inserted files will end up written into the
+    // table directory instead of the new partition directories.
+    StorageDescriptor sd = getMetaStoreTable().getSd().deepCopy();
+    sd.unsetLocation();
+    protoMsPartition.setSd(sd);
+
     protoMsPartition.setParameters(Collections.<String, String>emptyMap());
     LocalPartitionSpec spec = new LocalPartitionSpec(
         this, "", CatalogObjectsConstants.PROTOTYPE_PARTITION_ID);


[09/10] impala git commit: IMPALA-7276. Support CREATE TABLE AS SELECT with LocalCatalog

Posted by ar...@apache.org.
IMPALA-7276. Support CREATE TABLE AS SELECT with LocalCatalog

This fixed most of the remaining Kudu tests which relied on CTAS. Now only a
few Kudu tests fail:

FAIL query_test/test_kudu.py::TestKuduOperations::()::test_kudu_col_changed
FAIL query_test/test_kudu.py::TestKuduOperations::()::test_kudu_col_null_changed
FAIL query_test/test_kudu.py::TestKuduOperations::()::test_kudu_col_not_null_changed
FAIL query_test/test_kudu.py::TestKuduOperations::()::test_kudu_col_added

The above 4 fail because they are asserting something about the caching
behavior of the old catalog implementation.

FAIL query_test/test_kudu.py::TestImpalaKuduIntegration::()::test_delete_external_kudu_table
FAIL query_test/test_kudu.py::TestImpalaKuduIntegration::()::test_delete_managed_kudu_table

These fail due to attempting to load non-existent tables referred to by a
DELETE statement.  Need to investigate these further, but not related to CTAS.

Change-Id: I93937aed9b76ef6a62b1c588c59c34d3d6831a46
Reviewed-on: http://gerrit.cloudera.org:8080/10913
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Reviewed-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/ba813869
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/ba813869
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/ba813869

Branch: refs/heads/master
Commit: ba81386941a7178048fd1469bf6c2a371f253c3e
Parents: c333b55
Author: Todd Lipcon <to...@cloudera.com>
Authored: Tue Jul 10 13:25:15 2018 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Wed Jul 25 19:27:26 2018 +0000

----------------------------------------------------------------------
 .../analysis/CreateTableAsSelectStmt.java       |  6 +--
 .../main/java/org/apache/impala/catalog/Db.java | 17 +++++++
 .../java/org/apache/impala/catalog/FeDb.java    | 15 ++++++
 .../impala/catalog/local/LocalCatalog.java      | 10 +++-
 .../apache/impala/catalog/local/LocalDb.java    | 19 ++++++++
 .../impala/catalog/local/LocalFsTable.java      | 33 ++++++++++++--
 .../impala/catalog/local/LocalKuduTable.java    | 48 +++++++++++++++-----
 .../apache/impala/planner/HdfsTableSink.java    | 12 ++---
 .../org/apache/impala/planner/TableSink.java    |  3 +-
 9 files changed, 135 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
index 33a271b..4753b62 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
@@ -213,13 +213,11 @@ public class CreateTableAsSelectStmt extends StatementBase {
 
       FeTable tmpTable = null;
       if (KuduTable.isKuduTable(msTbl)) {
-        // TODO(todd): avoid downcast to 'Db' here
-        tmpTable = KuduTable.createCtasTarget((Db)db, msTbl, createStmt_.getColumnDefs(),
+        tmpTable = db.createKuduCtasTarget(msTbl, createStmt_.getColumnDefs(),
             createStmt_.getPrimaryKeyColumnDefs(),
             createStmt_.getKuduPartitionParams());
       } else if (HdfsFileFormat.isHdfsInputFormatClass(msTbl.getSd().getInputFormat())) {
-        // TODO(todd): avoid downcast to 'Db' here
-        tmpTable = HdfsTable.createCtasTarget((Db)db, msTbl);
+        tmpTable = db.createFsCtasTarget(msTbl);
       }
       Preconditions.checkState(tmpTable != null &&
           (tmpTable instanceof FeFsTable || tmpTable instanceof FeKuduTable));

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/Db.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Db.java b/fe/src/main/java/org/apache/impala/catalog/Db.java
index 5955e0a..0c3c2bd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Db.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Db.java
@@ -29,6 +29,8 @@ import org.apache.thrift.TSerializer;
 import org.apache.thrift.protocol.TCompactProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.thrift.TCatalogObject;
@@ -170,6 +172,21 @@ public class Db extends CatalogObjectImpl implements FeDb {
     return tableCache_.remove(tableName.toLowerCase());
   }
 
+  @Override
+  public FeKuduTable createKuduCtasTarget(
+      org.apache.hadoop.hive.metastore.api.Table msTbl,
+      List<ColumnDef> columnDefs, List<ColumnDef> primaryKeyColumnDefs,
+      List<KuduPartitionParam> kuduPartitionParams) {
+    return KuduTable.createCtasTarget(this, msTbl, columnDefs, primaryKeyColumnDefs,
+        kuduPartitionParams);
+  }
+
+  @Override
+  public FeFsTable createFsCtasTarget(org.apache.hadoop.hive.metastore.api.Table msTbl)
+      throws CatalogException {
+    return HdfsTable.createCtasTarget(this, msTbl);
+  }
+
   /**
    * Comparator that sorts function overloads. We want overloads to be always considered
    * in a canonical order so that overload resolution in the case of multiple valid

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/FeDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeDb.java b/fe/src/main/java/org/apache/impala/catalog/FeDb.java
index 111fbd0..057a2fa 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeDb.java
@@ -19,6 +19,9 @@ package org.apache.impala.catalog;
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.thrift.TDatabase;
 import org.apache.impala.thrift.TFunctionCategory;
 import org.apache.impala.util.PatternMatcher;
@@ -98,4 +101,16 @@ public interface FeDb extends HasName {
    * @return the Thrift-serialized structure for this database
    */
   TDatabase toThrift();
+
+  /**
+   * Create a target Kudu table object for CTAS.
+   */
+  FeKuduTable createKuduCtasTarget(Table msTbl, List<ColumnDef> columnDefs,
+      List<ColumnDef> primaryKeyColumnDefs,
+      List<KuduPartitionParam> kuduPartitionParams);
+
+  /**
+   * Create a target FS table object for CTAS.
+   */
+  FeFsTable createFsCtasTarget(Table msTbl) throws CatalogException;
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
index 03438c6..0d376b3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
@@ -37,6 +37,7 @@ import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.catalog.HdfsCachePool;
 import org.apache.impala.thrift.TCatalogObject;
 import org.apache.impala.thrift.TPartitionKeyValue;
@@ -184,7 +185,14 @@ public class LocalCatalog implements FeCatalog {
 
   @Override
   public Path getTablePath(Table msTbl) {
-    throw new UnsupportedOperationException("TODO");
+    // If the table did not have its path set, build the path based on the
+    // location property of the parent database.
+    if (msTbl.getSd().getLocation() == null || msTbl.getSd().getLocation().isEmpty()) {
+      String dbLocation = getDb(msTbl.getDbName()).getMetaStoreDb().getLocationUri();
+      return new Path(dbLocation, msTbl.getTableName().toLowerCase());
+    } else {
+      return new Path(msTbl.getSd().getLocation());
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
index 6a779d7..6b9209f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalDb.java
@@ -22,7 +22,13 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.KuduPartitionParam;
+import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Function.CompareMode;
@@ -104,6 +110,19 @@ class LocalDb implements FeDb {
   }
 
   @Override
+  public FeKuduTable createKuduCtasTarget(Table msTbl, List<ColumnDef> columnDefs,
+      List<ColumnDef> primaryKeyColumnDefs,
+      List<KuduPartitionParam> kuduPartitionParams) {
+    return LocalKuduTable.createCtasTarget(this, msTbl, columnDefs, primaryKeyColumnDefs,
+        kuduPartitionParams);
+  }
+
+  @Override
+  public FeFsTable createFsCtasTarget(Table msTbl) throws CatalogException {
+    return LocalFsTable.createCtasTarget(this, msTbl);
+  }
+
+  @Override
   public List<String> getAllTableNames() {
     loadTableNames();
     return ImmutableList.copyOf(tables_.keySet());

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index 5a5f3bd..81dcae1 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -54,6 +54,7 @@ import org.apache.thrift.TException;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
@@ -103,6 +104,20 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
         FeFsTable.DEFAULT_NULL_COLUMN_VALUE;
   }
 
+  /**
+   * Creates a temporary FsTable object populated with the specified properties.
+   * This is used for CTAS statements.
+   */
+  public static LocalFsTable createCtasTarget(LocalDb db,
+      Table msTbl) throws CatalogException {
+    // TODO(todd): set a member variable indicating this is a CTAS target
+    // so we can checkState() against it in various other methods and make
+    // sure we don't try to do something like load partitions for a not-yet-created
+    // table.
+    return new LocalFsTable(db, msTbl);
+  }
+
+
   @Override
   public boolean isCacheable() {
     // TODO Auto-generated method stub
@@ -162,8 +177,14 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
 
   @Override
   public HdfsFileFormat getMajorityFormat() {
-    // Needed by HdfsTableSink.
-    throw new UnsupportedOperationException("TODO: implement me");
+    // TODO(todd): can we avoid loading all partitions here? this is called
+    // for any INSERT query, even if the partition is specified.
+    Collection<? extends FeFsPartition> parts = FeCatalogUtils.loadAllPartitions(this);
+    // In the case that we have no partitions added to the table yet, it's
+    // important to add the "prototype" partition as a fallback.
+    Iterable<FeFsPartition> partitionsToConsider = Iterables.concat(
+        parts, Collections.singleton(createPrototypePartition()));
+    return FeCatalogUtils.getMajorityFormat(partitionsToConsider);
   }
 
   @Override
@@ -184,8 +205,12 @@ public class LocalFsTable extends LocalTable implements FeFsTable {
   }
 
   @Override
-  public TTableDescriptor toThriftDescriptor(int tableId, Set<Long> referencedPartitions) {
-    Preconditions.checkNotNull(referencedPartitions);
+  public TTableDescriptor toThriftDescriptor(int tableId,
+      Set<Long> referencedPartitions) {
+    if (referencedPartitions == null) {
+      // null means "all partitions".
+      referencedPartitions = getPartitionIds();
+    }
     Map<Long, THdfsPartition> idToPartition = Maps.newHashMap();
     List<? extends FeFsPartition> partitions = loadPartitions(referencedPartitions);
     for (FeFsPartition partition : partitions) {

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
index e12e3ac..8a6bb67 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
@@ -26,6 +26,7 @@ import javax.annotation.concurrent.Immutable;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.ColumnDef;
 import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeCatalogUtils;
@@ -49,7 +50,6 @@ import com.google.common.collect.Lists;
 public class LocalKuduTable extends LocalTable implements FeKuduTable {
   private final TableParams tableParams_;
   private final List<KuduPartitionParam> partitionBy_;
-  private final org.apache.kudu.client.KuduTable kuduTable_;
   private final ImmutableList<String> primaryKeyColumnNames_;
 
   /**
@@ -74,8 +74,38 @@ public class LocalKuduTable extends LocalTable implements FeKuduTable {
     // Use the schema derived from Kudu, rather than the one stored in the HMS.
     msTable.getSd().setCols(fieldSchemas);
 
+
+    List<String> pkNames = new ArrayList<>();
+    for (ColumnSchema c: kuduTable.getSchema().getPrimaryKeyColumns()) {
+      pkNames.add(c.getName().toLowerCase());
+    }
+
+    List<KuduPartitionParam> partitionBy = Utils.loadPartitionByParams(kuduTable);
+
     ColumnMap cmap = new ColumnMap(cols, /*numClusteringCols=*/0, fullTableName);
-    return new LocalKuduTable(db, msTable, cmap, kuduTable);
+    return new LocalKuduTable(db, msTable, cmap, pkNames, partitionBy);
+  }
+
+
+  public static FeKuduTable createCtasTarget(LocalDb db, Table msTable,
+      List<ColumnDef> columnDefs, List<ColumnDef> primaryKeyColumnDefs,
+      List<KuduPartitionParam> kuduPartitionParams) {
+    String fullTableName = msTable.getDbName() + "." + msTable.getTableName();
+
+    List<Column> columns = new ArrayList<>();
+    List<String> pkNames = new ArrayList<>();
+    int pos = 0;
+    for (ColumnDef colDef: columnDefs) {
+      // TODO(todd): it seems odd that for CTAS targets, the columns are of type
+      // 'Column' instead of 'KuduColumn'.
+      columns.add(new Column(colDef.getColName(), colDef.getType(), pos++));
+    }
+    for (ColumnDef pkColDef: primaryKeyColumnDefs) {
+      pkNames.add(pkColDef.getColName());
+    }
+
+    ColumnMap cmap = new ColumnMap(columns, /*numClusteringCols=*/0, fullTableName);
+    return new LocalKuduTable(db, msTable, cmap, pkNames, kuduPartitionParams);
   }
 
   private static void convertColsFromKudu(Schema schema, List<Column> cols,
@@ -100,18 +130,12 @@ public class LocalKuduTable extends LocalTable implements FeKuduTable {
   }
 
   private LocalKuduTable(LocalDb db, Table msTable, ColumnMap cmap,
-      org.apache.kudu.client.KuduTable kuduTable) {
+      List<String> primaryKeyColumnNames,
+      List<KuduPartitionParam> partitionBy)  {
     super(db, msTable, cmap);
     tableParams_ = new TableParams(msTable);
-    kuduTable_ = kuduTable;
-    partitionBy_ = ImmutableList.copyOf(Utils.loadPartitionByParams(
-        kuduTable));
-
-    ImmutableList.Builder<String> b = ImmutableList.builder();
-    for (ColumnSchema c: kuduTable_.getSchema().getPrimaryKeyColumns()) {
-      b.add(c.getName().toLowerCase());
-    }
-    primaryKeyColumnNames_ = b.build();
+    partitionBy_ = ImmutableList.copyOf(partitionBy);
+    primaryKeyColumnNames_ = ImmutableList.copyOf(primaryKeyColumnNames);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
index 0de2edb..7426641 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
@@ -21,10 +21,9 @@ import java.util.List;
 
 import org.apache.impala.analysis.DescriptorTable;
 import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.thrift.TDataSink;
 import org.apache.impala.thrift.TDataSinkType;
 import org.apache.impala.thrift.TExplainLevel;
@@ -36,8 +35,9 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /**
- * Base class for Hdfs data sinks such as HdfsTextTableSink.
+ * Sink for inserting into filesystem-backed tables.
  *
+ * TODO(vercegovac): rename to FsTableSink
  */
 public class HdfsTableSink extends TableSink {
   // Default number of partitions used for computeResourceProfile() in the absence of
@@ -61,7 +61,7 @@ public class HdfsTableSink extends TableSink {
   public HdfsTableSink(FeTable targetTable, List<Expr> partitionKeyExprs,
       boolean overwrite, boolean inputIsClustered, List<Integer> sortColumns) {
     super(targetTable, Op.INSERT);
-    Preconditions.checkState(targetTable instanceof HdfsTable);
+    Preconditions.checkState(targetTable instanceof FeFsTable);
     partitionKeyExprs_ = partitionKeyExprs;
     overwrite_ = overwrite;
     inputIsClustered_ = inputIsClustered;
@@ -70,7 +70,7 @@ public class HdfsTableSink extends TableSink {
 
   @Override
   public void computeResourceProfile(TQueryOptions queryOptions) {
-    HdfsTable table = (HdfsTable) targetTable_;
+    FeFsTable table = (FeFsTable) targetTable_;
     // TODO: Estimate the memory requirements more accurately by partition type.
     HdfsFileFormat format = table.getMajorityFormat();
     PlanNode inputNode = fragment_.getPlanRoot();
@@ -164,7 +164,7 @@ public class HdfsTableSink extends TableSink {
     TDataSink result = new TDataSink(TDataSinkType.TABLE_SINK);
     THdfsTableSink hdfsTableSink = new THdfsTableSink(
         Expr.treesToThrift(partitionKeyExprs_), overwrite_, inputIsClustered_);
-    HdfsTable table = (HdfsTable) targetTable_;
+    FeFsTable table = (FeFsTable) targetTable_;
     StringBuilder error = new StringBuilder();
     int skipHeaderLineCount = table.parseSkipHeaderLineCount(error);
     // Errors will be caught during analysis.

http://git-wip-us.apache.org/repos/asf/impala/blob/ba813869/fe/src/main/java/org/apache/impala/planner/TableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/TableSink.java b/fe/src/main/java/org/apache/impala/planner/TableSink.java
index 677805e..214c9b4 100644
--- a/fe/src/main/java/org/apache/impala/planner/TableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/TableSink.java
@@ -20,6 +20,7 @@ package org.apache.impala.planner;
 import java.util.List;
 
 import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
@@ -96,7 +97,7 @@ public abstract class TableSink extends DataSink {
     Preconditions.checkNotNull(partitionKeyExprs);
     Preconditions.checkNotNull(referencedColumns);
     Preconditions.checkNotNull(sortColumns);
-    if (table instanceof HdfsTable) {
+    if (table instanceof FeFsTable) {
       // Hdfs only supports inserts.
       Preconditions.checkState(sinkAction == Op.INSERT);
       // Referenced columns don't make sense for an Hdfs table.


[03/10] impala git commit: IMPALA-7330. After LOAD DATA, only refresh affected partition

Posted by ar...@apache.org.
IMPALA-7330. After LOAD DATA, only refresh affected partition

This changes LOAD DATA so that, if a specific partition is provided,
only the named partition will be refreshed upon completion of the
statement.

No new tests are added since this code path is covered by existing tests
and this just optimizes the metadata reload.

I did verify looking at the catalogd logs that only the single specified
partition was refreshed when I issued a LOAD statement from the shell.

Change-Id: I3b29846deac49a89abcd3495e4b757ef536ff331
Reviewed-on: http://gerrit.cloudera.org:8080/11014
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Reviewed-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/b76207c5
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/b76207c5
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/b76207c5

Branch: refs/heads/master
Commit: b76207c5924f163f022264730a7b0d977c663300
Parents: 21d0c06
Author: Todd Lipcon <to...@cloudera.com>
Authored: Mon Jul 23 12:28:43 2018 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Tue Jul 24 20:45:20 2018 +0000

----------------------------------------------------------------------
 be/src/service/client-request-state.cc | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/b76207c5/be/src/service/client-request-state.cc
----------------------------------------------------------------------
diff --git a/be/src/service/client-request-state.cc b/be/src/service/client-request-state.cc
index 896ef49..398ab59 100644
--- a/be/src/service/client-request-state.cc
+++ b/be/src/service/client-request-state.cc
@@ -180,6 +180,10 @@ Status ClientRequestState::Exec(TExecRequest* exec_request) {
       reset_req.reset_metadata_params.__set_is_refresh(true);
       reset_req.reset_metadata_params.__set_table_name(
           exec_request_.load_data_request.table_name);
+      if (exec_request_.load_data_request.__isset.partition_spec) {
+        reset_req.reset_metadata_params.__set_partition_spec(
+            exec_request_.load_data_request.partition_spec);
+      }
       reset_req.reset_metadata_params.__set_sync_ddl(
           exec_request_.query_options.sync_ddl);
       catalog_op_executor_.reset(


[07/10] impala git commit: IMPALA-7257. Support Kudu tables in LocalCatalog

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/planner/TableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/TableSink.java b/fe/src/main/java/org/apache/impala/planner/TableSink.java
index a702206..677805e 100644
--- a/fe/src/main/java/org/apache/impala/planner/TableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/TableSink.java
@@ -20,10 +20,10 @@ package org.apache.impala.planner;
 import java.util.List;
 
 import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.thrift.TSinkAction;
 
 import com.google.common.base.Preconditions;
@@ -116,7 +116,7 @@ public abstract class TableSink extends DataSink {
       Preconditions.checkState(sortColumns.isEmpty());
       // Create the HBaseTableSink and return it.
       return new HBaseTableSink(table);
-    } else if (table instanceof KuduTable) {
+    } else if (table instanceof FeKuduTable) {
       // Kudu doesn't have a way to perform INSERT OVERWRITE.
       Preconditions.checkState(overwrite == false);
       // Sort columns are not supported for Kudu tables.

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index ac65171..1ff9d20 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -81,11 +81,11 @@ import org.apache.impala.catalog.FeDataSource;
 import org.apache.impala.catalog.FeDataSourceTable;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.ImpaladCatalog;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -330,7 +330,7 @@ public class Frontend {
           new TColumn("name", Type.STRING.toThrift()),
           new TColumn("type", Type.STRING.toThrift()),
           new TColumn("comment", Type.STRING.toThrift()));
-      if (descStmt.getTable() instanceof KuduTable
+      if (descStmt.getTable() instanceof FeKuduTable
           && descStmt.getOutputStyle() == TDescribeOutputStyle.MINIMAL) {
         columns.add(new TColumn("primary_key", Type.STRING.toThrift()));
         columns.add(new TColumn("nullable", Type.STRING.toThrift()));
@@ -750,11 +750,11 @@ public class Frontend {
       return ((HBaseTable) table).getTableStats();
     } else if (table instanceof FeDataSourceTable) {
       return ((FeDataSourceTable) table).getTableStats();
-    } else if (table instanceof KuduTable) {
+    } else if (table instanceof FeKuduTable) {
       if (op == TShowStatsOp.RANGE_PARTITIONS) {
-        return ((KuduTable) table).getRangePartitions();
+        return FeKuduTable.Utils.getRangePartitions((FeKuduTable) table);
       } else {
-        return ((KuduTable) table).getTableStats();
+        return FeKuduTable.Utils.getTableStats((FeKuduTable) table);
       }
     } else {
       throw new InternalException("Invalid table class: " + table.getClass());
@@ -838,7 +838,7 @@ public class Frontend {
       filteredColumns = table.getColumnsInHiveOrder();
     }
     if (outputStyle == TDescribeOutputStyle.MINIMAL) {
-      if (!(table instanceof KuduTable)) {
+      if (!(table instanceof FeKuduTable)) {
         return DescribeResultFactory.buildDescribeMinimalResult(
             Column.columnsToStruct(filteredColumns));
       }

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
index 0ce0cf9..a5b575d 100644
--- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Table;
@@ -362,7 +363,8 @@ public class KuduCatalogOpExecutor {
 
   private static List<Pair<PartialRow, RangePartitionBound>> getRangePartitionBounds(
       TRangePartition rangePartition, KuduTable tbl) throws ImpalaRuntimeException {
-    List<String> rangePartitioningColNames = tbl.getRangePartitioningColNames();
+    List<String> rangePartitioningColNames =
+        FeKuduTable.Utils.getRangePartitioningColNames(tbl);
     List<String> rangePartitioningKuduColNames =
       Lists.newArrayListWithCapacity(rangePartitioningColNames.size());
     for (String colName : rangePartitioningColNames) {

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/main/java/org/apache/impala/util/KuduUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/KuduUtil.java b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
index 07378a9..0293de3 100644
--- a/fe/src/main/java/org/apache/impala/util/KuduUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
@@ -31,7 +31,7 @@ import org.apache.impala.analysis.FunctionCallExpr;
 import org.apache.impala.analysis.InsertStmt;
 import org.apache.impala.analysis.KuduPartitionExpr;
 import org.apache.impala.analysis.LiteralExpr;
-import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
@@ -450,9 +450,9 @@ public class KuduUtil {
    */
   public static Expr createPartitionExpr(InsertStmt insertStmt, Analyzer analyzer)
       throws AnalysisException {
-    Preconditions.checkState(insertStmt.getTargetTable() instanceof KuduTable);
+    Preconditions.checkState(insertStmt.getTargetTable() instanceof FeKuduTable);
     Expr kuduPartitionExpr = new KuduPartitionExpr(DescriptorTable.TABLE_SINK_ID,
-        (KuduTable) insertStmt.getTargetTable(),
+        (FeKuduTable) insertStmt.getTargetTable(),
         Lists.newArrayList(insertStmt.getPartitionKeyExprs()),
         insertStmt.getPartitionColPos());
     kuduPartitionExpr.analyze(analyzer);

http://git-wip-us.apache.org/repos/asf/impala/blob/c333b552/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
index d85b8e3..30dfc5b 100644
--- a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.*;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.impala.analysis.ToSqlUtils;
 import org.apache.impala.catalog.CatalogTest;
 import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.FeCatalogUtils;
@@ -37,9 +38,12 @@ import org.apache.impala.thrift.TCatalogObjectType;
 import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.util.MetaStoreUtil;
 import org.apache.impala.util.PatternMatcher;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 
@@ -50,7 +54,7 @@ public class LocalCatalogTest {
   @Before
   public void setupCatalog() {
     FeSupport.loadLibrary();
-    catalog_ = new LocalCatalog(new DirectMetaProvider());
+    catalog_ = LocalCatalog.create(/*defaultKuduMasterHosts=*/null);
   }
 
   @Test
@@ -176,4 +180,35 @@ public class LocalCatalogTest {
     assertEquals(TCatalogObjectType.VIEW, v.getCatalogObjectType());
     assertEquals("SELECT * FROM functional.alltypes", v.getQueryStmt().toSql());
   }
+
+  @Test
+  public void testKuduTable() throws Exception {
+    LocalKuduTable t = (LocalKuduTable) catalog_.getTable("functional_kudu",  "alltypes");
+    assertEquals("id,bool_col,tinyint_col,smallint_col,int_col," +
+        "bigint_col,float_col,double_col,date_string_col,string_col," +
+        "timestamp_col,year,month", Joiner.on(",").join(t.getColumnNames()));
+    // Assert on the generated SQL for the table, but not the table properties, since
+    // those might change based on whether this test runs before or after other
+    // tests which compute stats, etc.
+    Assert.assertThat(ToSqlUtils.getCreateTableSql(t), CoreMatchers.startsWith(
+        "CREATE TABLE functional_kudu.alltypes (\n" +
+        "  id INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  bool_col BOOLEAN NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  tinyint_col TINYINT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  smallint_col SMALLINT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  int_col INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  bigint_col BIGINT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  float_col FLOAT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  double_col DOUBLE NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  date_string_col STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  string_col STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  timestamp_col TIMESTAMP NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  year INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  month INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,\n" +
+        "  PRIMARY KEY (id)\n" +
+        ")\n" +
+        "PARTITION BY HASH (id) PARTITIONS 3\n" +
+        "STORED AS KUDU\n" +
+        "TBLPROPERTIES"));
+  }
 }


[06/10] impala git commit: IMPALA-7218: [DOCS] Support column list in ALTER VIEW

Posted by ar...@apache.org.
IMPALA-7218: [DOCS] Support column list in ALTER VIEW

Change-Id: I19e5cf97302a46738fd832344415fb7ad4ca0e41
Reviewed-on: http://gerrit.cloudera.org:8080/11043
Reviewed-by: Fredy Wijaya <fw...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/def5c881
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/def5c881
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/def5c881

Branch: refs/heads/master
Commit: def5c881b1dee93848daea52d93a0602e11338ca
Parents: 73be154
Author: Alex Rodoni <ar...@cloudera.com>
Authored: Tue Jul 24 17:22:32 2018 -0700
Committer: Alex Rodoni <ar...@cloudera.com>
Committed: Wed Jul 25 19:19:16 2018 +0000

----------------------------------------------------------------------
 docs/topics/impala_alter_view.xml | 87 ++++++++++++++++++++--------------
 1 file changed, 52 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/def5c881/docs/topics/impala_alter_view.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_alter_view.xml b/docs/topics/impala_alter_view.xml
index e0d21d9..08d26bb 100644
--- a/docs/topics/impala_alter_view.xml
+++ b/docs/topics/impala_alter_view.xml
@@ -21,7 +21,13 @@ under the License.
 <concept rev="1.1" id="alter_view">
 
   <title>ALTER VIEW Statement</title>
-  <titlealts audience="PDF"><navtitle>ALTER VIEW</navtitle></titlealts>
+
+  <titlealts audience="PDF">
+
+    <navtitle>ALTER VIEW</navtitle>
+
+  </titlealts>
+
   <prolog>
     <metadata>
       <data name="Category" value="Impala"/>
@@ -38,67 +44,78 @@ under the License.
   <conbody>
 
     <p>
-      <indexterm audience="hidden">ALTER VIEW statement</indexterm>
-      Changes the characteristics of a view. The syntax has two forms:
+      The <codeph>ALTER VIEW</codeph> statement changes the characteristics of a view.
     </p>
 
-    <ul>
-      <li>
-        The <codeph>AS</codeph> clause associates the view with a different query.
-      </li>
-      <li>
-        The <codeph>RENAME TO</codeph> clause changes the name of the view, moves the view to
-        a different database, or both.
-      </li>
-    </ul>
-
     <p>
-      Because a view is purely a logical construct (an alias for a query) with no physical data behind it,
-      <codeph>ALTER VIEW</codeph> only involves changes to metadata in the metastore database, not any data files
-      in HDFS.
+      Because a view is a logical construct, an alias for a query, with no physical data behind
+      it, <codeph>ALTER VIEW</codeph> only involves changes to metadata in the metastore
+      database, not any data files in HDFS.
     </p>
 
-<!-- View _permissions_ don't rely on underlying table. -->
+    <p>
+      To see the definition of the updated view, issue a <codeph>DESCRIBE FORMATTED</codeph>
+      statement.
+    </p>
 
-<!-- Could use views to grant access only to certain columns. -->
+    <p conref="../shared/impala_common.xml#common/syntax_blurb"/>
 
-<!-- Treated like a table for authorization. -->
+<codeblock>ALTER VIEW [<varname>database_name</varname>.]<varname>view_name</varname>
+   [(<varname>column_name</varname> [COMMENT '<varname>column_comment</varname>'][, ...])]
+   AS <varname>select_statement</varname>;
 
-<!-- ALTER VIEW that queries another view - possibly a runtime error. -->
+ALTER VIEW [<varname>database_name</varname>.]<varname>view_name</varname>
+   RENAME TO [<varname>database_name</varname>.]<varname>view_name</varname>;</codeblock>
 
-    <p conref="../shared/impala_common.xml#common/syntax_blurb"/>
+    <ul>
+      <li>
+        The <codeph>AS</codeph> clause associates the view with a different query.
+        <p>
+          An optional list of column names can be specified with or without the column-level
+          comments.
+        </p>
+
+        <p>
+          For example:
+<codeblock>
+ALTER VIEW v1 AS SELECT x, UPPER(s) s FROM t2;
+ALTER VIEW v1 (c1, c2, c3) AS SELECT x, UPPER(s) s FROM t2;
+ALTER VIEW v7 (c1 COMMENT 'Comment for c1', c2) AS SELECT t1.c1, t1.c2 FROM t1;
+</codeblock>
+        </p>
+      </li>
 
-<codeblock>ALTER VIEW [<varname>database_name</varname>.]<varname>view_name</varname> AS <varname>select_statement</varname>
-ALTER VIEW [<varname>database_name</varname>.]<varname>view_name</varname> RENAME TO [<varname>database_name</varname>.]<varname>view_name</varname></codeblock>
+      <li>
+        The <codeph>RENAME TO</codeph> clause changes the name of the view, moves the view to a
+        different database, or both.
+        <p>
+          For example:
+<codeblock>ALTER VIEW db1.v1 RENAME TO db2.v2; -- Move the view to a different database with a new name.
+ALTER VIEW db1.v1 RENAME TO db1.v2;  -- Rename the view in the same database.
+ALTER VIEW db1.v1 RENAME TO db2.v1; -- Move the view to a difference database with the same view name.</codeblock>
+        </p>
+      </li>
+    </ul>
 
     <p conref="../shared/impala_common.xml#common/ddl_blurb"/>
 
     <p conref="../shared/impala_common.xml#common/sync_ddl_blurb"/>
 
     <p conref="../shared/impala_common.xml#common/security_blurb"/>
+
     <p conref="../shared/impala_common.xml#common/redaction_yes"/>
 
     <p conref="../shared/impala_common.xml#common/cancel_blurb_no"/>
 
     <p conref="../shared/impala_common.xml#common/permissions_blurb_no"/>
 
-    <p conref="../shared/impala_common.xml#common/example_blurb"/>
-
-<codeblock>create table t1 (x int, y int, s string);
-create table t2 like t1;
-create view v1 as select * from t1;
-alter view v1 as select * from t2;
-alter view v1 as select x, upper(s) s from t2;</codeblock>
-
-<!-- Repeat the same blurb + example to see the definition of a view, as in CREATE VIEW. -->
-
-    <p conref="../shared/impala_common.xml#common/describe_formatted_view"/>
-
     <p conref="../shared/impala_common.xml#common/related_info"/>
 
     <p>
       <xref href="impala_views.xml#views"/>, <xref href="impala_create_view.xml#create_view"/>,
       <xref href="impala_drop_view.xml#drop_view"/>
     </p>
+
   </conbody>
+
 </concept>