You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by bo...@apache.org on 2019/02/12 17:30:55 UTC

[impala] 01/03: IMPALA-7128 (part 1) Refactor interfaces for Db, View, Table, Partition

This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch 2.x
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 55da35e9fe4d6ccc08662c0cf17961a987183646
Author: Todd Lipcon <to...@cloudera.com>
AuthorDate: Tue Jun 5 18:05:14 2018 -0700

    IMPALA-7128 (part 1) Refactor interfaces for Db, View, Table, Partition
    
    This refactors out interfaces in the frontend for the interaction
    between the analysis/planning code and the classes that implement
    these catalog objects.
    
    This takes care of the most commonly used objects but defers some others
    (e.g. functions, cache pools, data sources, etc) to follow-on patches.
    
    There are a few spots remaining in the frontend that still downcast to
    implementation classes, particularly where the frontend actually makes
    modifications to catalog objects in-place. I left TODOs in those spots
    and will come back later as necessary.
    
    Change-Id: Id55f7d2e94d81e66ce720acb6315f15a89621b31
    Reviewed-on: http://gerrit.cloudera.org:8080/10611
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-on: http://gerrit.cloudera.org:8080/12345
    Reviewed-by: Fredy Wijaya <fw...@cloudera.com>
---
 .../AlterTableAddDropRangePartitionStmt.java       |   4 +-
 .../analysis/AlterTableAddPartitionStmt.java       |   4 +-
 .../analysis/AlterTableAddReplaceColsStmt.java     |   4 +-
 .../impala/analysis/AlterTableAlterColStmt.java    |   5 +-
 .../impala/analysis/AlterTableDropColStmt.java     |   5 +-
 .../analysis/AlterTableDropPartitionStmt.java      |   4 +-
 .../analysis/AlterTableOrViewRenameStmt.java       |   6 +-
 .../impala/analysis/AlterTableSetCachedStmt.java   |   9 +-
 .../analysis/AlterTableSetFileFormatStmt.java      |   4 +-
 .../impala/analysis/AlterTableSetLocationStmt.java |  15 +-
 .../analysis/AlterTableSetRowFormatStmt.java       |  11 +-
 .../apache/impala/analysis/AlterTableSetStmt.java  |   4 +-
 .../analysis/AlterTableSetTblProperties.java       |   8 +-
 .../impala/analysis/AlterTableSortByStmt.java      |   8 +-
 .../org/apache/impala/analysis/AlterTableStmt.java |   6 +-
 .../org/apache/impala/analysis/AlterViewStmt.java  |   8 +-
 .../apache/impala/analysis/AnalysisContext.java    |  10 +-
 .../java/org/apache/impala/analysis/Analyzer.java  |  62 ++++----
 .../org/apache/impala/analysis/BaseTableRef.java   |   4 +-
 .../apache/impala/analysis/ColumnLineageGraph.java |   5 +-
 .../apache/impala/analysis/ComputeStatsStmt.java   |  56 ++++----
 .../org/apache/impala/analysis/CreateDbStmt.java   |   4 +-
 .../impala/analysis/CreateFunctionStmtBase.java    |   4 +-
 .../impala/analysis/CreateTableAsSelectStmt.java   |  19 +--
 .../impala/analysis/CreateTableLikeStmt.java       |   4 +-
 .../apache/impala/analysis/DescribeTableStmt.java  |   7 +-
 .../apache/impala/analysis/DescriptorTable.java    |  26 ++--
 .../org/apache/impala/analysis/DropDbStmt.java     |   4 +-
 .../apache/impala/analysis/DropFunctionStmt.java   |   4 +-
 .../impala/analysis/DropTableOrViewStmt.java       |  10 +-
 .../apache/impala/analysis/FunctionCallExpr.java   |   3 +-
 .../org/apache/impala/analysis/InlineViewRef.java  |   6 +-
 .../org/apache/impala/analysis/InsertStmt.java     |  13 +-
 .../apache/impala/analysis/IsNullPredicate.java    |   4 +-
 .../org/apache/impala/analysis/LoadDataStmt.java   |   8 +-
 .../org/apache/impala/analysis/ModifyStmt.java     |   4 +-
 .../org/apache/impala/analysis/PartitionDef.java   |  10 +-
 .../org/apache/impala/analysis/PartitionSet.java   |  12 +-
 .../apache/impala/analysis/PartitionSpecBase.java  |   4 +-
 .../main/java/org/apache/impala/analysis/Path.java |  10 +-
 .../org/apache/impala/analysis/PrivilegeSpec.java  |  12 +-
 .../org/apache/impala/analysis/SelectStmt.java     |   4 +-
 .../impala/analysis/ShowCreateFunctionStmt.java    |   4 +-
 .../impala/analysis/ShowCreateTableStmt.java       |  10 +-
 .../org/apache/impala/analysis/ShowFilesStmt.java  |   8 +-
 .../org/apache/impala/analysis/ShowStatsStmt.java  |  12 +-
 .../java/org/apache/impala/analysis/SlotRef.java   |   4 +-
 .../apache/impala/analysis/StmtMetadataLoader.java |  34 ++---
 .../java/org/apache/impala/analysis/TableDef.java  |   8 +-
 .../java/org/apache/impala/analysis/TableRef.java  |   4 +-
 .../org/apache/impala/analysis/ToSqlUtils.java     |   9 +-
 .../org/apache/impala/analysis/TruncateStmt.java   |   8 +-
 .../apache/impala/analysis/TupleDescriptor.java    |   8 +-
 .../org/apache/impala/analysis/WithClause.java     |   5 +-
 .../java/org/apache/impala/catalog/Catalog.java    |   1 -
 fe/src/main/java/org/apache/impala/catalog/Db.java |  27 ++--
 .../java/org/apache/impala/catalog/FeCatalog.java  | 119 +++++++++++++++
 .../main/java/org/apache/impala/catalog/FeDb.java  | 100 +++++++++++++
 .../org/apache/impala/catalog/FeFsPartition.java   | 155 ++++++++++++++++++++
 .../java/org/apache/impala/catalog/FeFsTable.java  | 160 +++++++++++++++++++++
 .../java/org/apache/impala/catalog/FeTable.java    | 131 +++++++++++++++++
 .../java/org/apache/impala/catalog/FeView.java     |  45 ++++++
 .../org/apache/impala/catalog/HdfsPartition.java   |  62 ++++----
 .../java/org/apache/impala/catalog/HdfsTable.java  | 105 +++++++-------
 .../org/apache/impala/catalog/ImpaladCatalog.java  |  35 ++---
 .../main/java/org/apache/impala/catalog/Table.java |  52 +++----
 .../main/java/org/apache/impala/catalog/View.java  |  14 +-
 .../org/apache/impala/planner/HBaseTableSink.java  |   4 +-
 .../apache/impala/planner/HdfsPartitionFilter.java |  13 +-
 .../apache/impala/planner/HdfsPartitionPruner.java |  24 ++--
 .../org/apache/impala/planner/HdfsScanNode.java    |  25 ++--
 .../org/apache/impala/planner/HdfsTableSink.java   |   3 +-
 .../java/org/apache/impala/planner/JoinNode.java   |   4 +-
 .../org/apache/impala/planner/KuduTableSink.java   |   4 +-
 .../java/org/apache/impala/planner/Planner.java    |   4 +-
 .../impala/planner/RuntimeFilterGenerator.java     |   4 +-
 .../java/org/apache/impala/planner/ScanNode.java   |   4 +-
 .../apache/impala/planner/SingleNodePlanner.java   |  15 +-
 .../java/org/apache/impala/planner/TableSink.java  |   8 +-
 .../apache/impala/service/CatalogOpExecutor.java   |  31 ++--
 .../java/org/apache/impala/service/Frontend.java   |  16 ++-
 .../java/org/apache/impala/service/JniCatalog.java |   4 +-
 .../org/apache/impala/service/JniFrontend.java     |   7 +-
 .../java/org/apache/impala/service/MetadataOp.java |   4 +-
 .../apache/impala/util/AvroSchemaConverter.java    |   1 -
 .../org/apache/impala/util/HdfsCachingUtil.java    |   3 +-
 .../apache/impala/analysis/AuthorizationTest.java  |   8 +-
 .../impala/analysis/StmtMetadataLoaderTest.java    |   3 +-
 .../catalog/CatalogObjectToFromThriftTest.java     |   6 +-
 .../org/apache/impala/catalog/CatalogTest.java     |   4 +-
 .../apache/impala/testutil/BlockIdGenerator.java   |  12 +-
 .../apache/impala/testutil/ImpaladTestCatalog.java |   5 +-
 92 files changed, 1233 insertions(+), 517 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
index aee07f7..6c62df5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
@@ -20,8 +20,8 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -87,7 +87,7 @@ public class AlterTableAddDropRangePartitionStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table table = getTargetTable();
+    FeTable table = getTargetTable();
     if (!(table instanceof KuduTable)) {
       throw new AnalysisException(String.format("Table %s does not support range " +
           "partitions: RANGE %s", table.getFullName(), rangePartitionSpec_.toSql()));
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index 59fdf2b..9c1a035 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -21,8 +21,8 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Sets;
 
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.thrift.TAlterTableAddPartitionParams;
@@ -83,7 +83,7 @@ public class AlterTableAddPartitionStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table table = getTargetTable();
+    FeTable table = getTargetTable();
     if (table instanceof KuduTable) {
       throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported for " +
           "Kudu tables: " + table.getTableName());
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
index 598b47f..d00c0cd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -23,9 +23,9 @@ import java.util.Set;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -72,7 +72,7 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table t = getTargetTable();
+    FeTable t = getTargetTable();
     // TODO: Support column-level DDL on HBase tables. Requires updating the column
     // mappings along with the table columns.
     if (t instanceof HBaseTable) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
index fdfe3ab..e6b0d49 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
@@ -19,12 +19,11 @@ package org.apache.impala.analysis;
 
 import java.util.Map;
 
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableAlterColParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -101,7 +100,7 @@ public class AlterTableAlterColStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table t = getTargetTable();
+    FeTable t = getTargetTable();
     if (t instanceof HBaseTable) {
       throw new AnalysisException(
           "ALTER TABLE CHANGE/ALTER COLUMN not currently supported on HBase tables.");
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
index 3753dbe..94fbc8d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
@@ -18,9 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableDropColParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -55,7 +54,7 @@ public class AlterTableDropColStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table t = getTargetTable();
+    FeTable t = getTargetTable();
     // TODO: Support column-level DDL on HBase tables. Requires updating the column
     // mappings along with the table columns.
     if (t instanceof HBaseTable) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index e183e80..048b985 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -18,8 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableDropPartitionParams;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -74,7 +74,7 @@ public class AlterTableDropPartitionStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table table = getTargetTable();
+    FeTable table = getTargetTable();
     if (table instanceof KuduTable) {
       throw new AnalysisException("ALTER TABLE DROP PARTITION is not supported for " +
           "Kudu tables: " + partitionSet_.toSql());
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
index 650c77c..4cf2ff3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
@@ -18,7 +18,7 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.View;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAccessEvent;
 import org.apache.impala.thrift.TAlterTableOrViewRenameParams;
@@ -71,11 +71,11 @@ public class AlterTableOrViewRenameStmt extends AlterTableStmt {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     newTableName_.analyze();
     table_ = analyzer.getTable(tableName_, Privilege.ALTER);
-    if (table_ instanceof View && renameTable_) {
+    if (table_ instanceof FeView && renameTable_) {
       throw new AnalysisException(String.format(
           "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
     }
-    if (!(table_ instanceof View) && !renameTable_) {
+    if (!(table_ instanceof FeView) && !renameTable_) {
       throw new AnalysisException(String.format(
           "ALTER VIEW not allowed on a table: %s", table_.getFullName()));
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
index 931c6a9..0ffca1e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -20,8 +20,9 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetCachedParams;
@@ -60,7 +61,7 @@ public class AlterTableSetCachedStmt extends AlterTableSetStmt {
     super.analyze(analyzer);
     cacheOp_.analyze(analyzer);
 
-    Table table = getTargetTable();
+    FeTable table = getTargetTable();
     Preconditions.checkNotNull(table);
     if (!(table instanceof HdfsTable)) {
       throw new AnalysisException("ALTER TABLE SET [CACHED|UNCACHED] must target an " +
@@ -73,9 +74,9 @@ public class AlterTableSetCachedStmt extends AlterTableSetStmt {
       HdfsTable hdfsTable = (HdfsTable)table;
       StringBuilder nameSb = new StringBuilder();
       if (partitionSet != null) {
-        List<HdfsPartition> parts = partitionSet.getPartitions();
+        List<FeFsPartition> parts = partitionSet.getPartitions();
         nameSb.append("Partition(s) (");
-        for(HdfsPartition part: parts) {
+        for(FeFsPartition part: parts) {
           isCacheable = isCacheable && part.isCacheable();
           if(!part.isCacheable()) nameSb.append(part.getPartitionName());
         }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index 36db614..31c046a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -17,7 +17,7 @@
 
 package org.apache.impala.analysis;
 
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -55,7 +55,7 @@ public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table tbl = getTargetTable();
+    FeTable tbl = getTargetTable();
     if (tbl instanceof KuduTable) {
       throw new AnalysisException("ALTER TABLE SET FILEFORMAT is not supported " +
           "on Kudu tables: " + tbl.getFullName());
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index ae31a76..524f076 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -22,9 +22,10 @@ import java.util.List;
 
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -74,23 +75,23 @@ public class AlterTableSetLocationStmt extends AlterTableSetStmt {
     super.analyze(analyzer);
     location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
 
-    Table table = getTargetTable();
+    FeTable table = getTargetTable();
     Preconditions.checkNotNull(table);
     if (table instanceof HdfsTable) {
       HdfsTable hdfsTable = (HdfsTable) table;
       if (getPartitionSet() != null) {
         // Targeting a partition rather than a table.
-        List<HdfsPartition> partitions = getPartitionSet().getPartitions();
+        List<FeFsPartition> partitions = getPartitionSet().getPartitions();
         if (partitions.isEmpty()) { return; }
         if (partitions.size() != 1) {
           // Sort the partitions to get a consistent error reporting.
-          List<HdfsPartition> sortedPartitions = Lists.newArrayList(partitions);
-          Collections.sort(sortedPartitions);
+          List<FeFsPartition> sortedPartitions = Lists.newArrayList(partitions);
+          Collections.sort(sortedPartitions, HdfsPartition.KV_COMPARATOR);
           List<String> sortedPartitionNames =
               Lists.transform(sortedPartitions.subList(0, NUM_PARTITION_LOG_LIMIT),
-                  new Function<HdfsPartition, String>() {
+                  new Function<FeFsPartition, String>() {
                     @Override
-                    public String apply(HdfsPartition hdfsPartition) {
+                    public String apply(FeFsPartition hdfsPartition) {
                       return hdfsPartition.getPartitionName();
                     }
                   });
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
index 52f7a32..cdc71b3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
@@ -17,14 +17,11 @@
 
 package org.apache.impala.analysis;
 
-import java.util.Collection;
-
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.RowFormat;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetRowFormatParams;
@@ -60,13 +57,13 @@ public class AlterTableSetRowFormatStmt extends AlterTableSetStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table tbl = getTargetTable();
+    FeTable tbl = getTargetTable();
     if (!(tbl instanceof HdfsTable)) {
       throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is only " +
           "supported on HDFS tables. Conflicting table: %1$s", tbl.getFullName()));
     }
     if (partitionSet_ != null) {
-      for (HdfsPartition partition: partitionSet_.getPartitions()) {
+      for (FeFsPartition partition: partitionSet_.getPartitions()) {
         if (partition.getFileFormat() != HdfsFileFormat.TEXT &&
             partition.getFileFormat() != HdfsFileFormat.SEQUENCE_FILE) {
           throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is " +
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
index 178d28b..948a7aa 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
@@ -18,8 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 
 /**
@@ -39,7 +39,7 @@ public class AlterTableSetStmt extends AlterTableStmt {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     super.analyze(analyzer);
-    Table t = getTargetTable();
+    FeTable t = getTargetTable();
     // TODO: Support ALTER TABLE SET on HBase tables. Requires validating changes
     // to the SERDEPROPERTIES and TBLPROPERTIES to ensure the table metadata does not
     // become invalid.
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
index 732f5f6..26c5ac0 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
@@ -177,10 +179,10 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
    * null, then the method ensures that 'skip.header.line.count' is supported for its
    * table type. If it is null, then this check is omitted.
    */
-  public static void analyzeSkipHeaderLineCount(Table table,
+  public static void analyzeSkipHeaderLineCount(FeTable table,
       Map<String, String> tblProperties) throws AnalysisException {
     if (tblProperties.containsKey(HdfsTable.TBL_PROP_SKIP_HEADER_LINE_COUNT)) {
-      if (table != null && !(table instanceof HdfsTable)) {
+      if (table != null && !(table instanceof FeFsTable)) {
         throw new AnalysisException(String.format("Table property " +
             "'skip.header.line.count' is only supported for HDFS tables."));
       }
@@ -198,7 +200,7 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
    * Returns a list of positions of the sort columns within the table's list of
    * columns.
    */
-  public static List<Integer> analyzeSortColumns(Table table,
+  public static List<Integer> analyzeSortColumns(FeTable table,
       Map<String, String> tblProperties) throws AnalysisException {
     if (!tblProperties.containsKey(
         AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS)) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
index 1bf6658..2b54b74 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSortByStmt.java
@@ -17,15 +17,12 @@
 
 package org.apache.impala.analysis;
 
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetTblPropertiesParams;
@@ -34,7 +31,6 @@ import org.apache.impala.thrift.TTablePropertyType;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Maps;
 
 /**
@@ -71,7 +67,7 @@ public class AlterTableSortByStmt extends AlterTableStmt {
     super.analyze(analyzer);
 
     // Disallow setting sort columns on HBase and Kudu tables.
-    Table targetTable = getTargetTable();
+    FeTable targetTable = getTargetTable();
     if (targetTable instanceof HBaseTable) {
       throw new AnalysisException("ALTER TABLE SORT BY not supported on HBase tables.");
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index 75abaa7..eda260f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -21,7 +21,7 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.DataSourceTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TTableName;
@@ -35,7 +35,7 @@ public abstract class AlterTableStmt extends StatementBase {
   protected final TableName tableName_;
 
   // Set during analysis.
-  protected Table table_;
+  protected FeTable table_;
 
   protected AlterTableStmt(TableName tableName) {
     Preconditions.checkState(tableName != null && !tableName.isEmpty());
@@ -57,7 +57,7 @@ public abstract class AlterTableStmt extends StatementBase {
    * Can only be called after analysis, returns the Table object of the target of this
    * ALTER TABLE statement.
    */
-  protected Table getTargetTable() {
+  protected FeTable getTargetTable() {
     Preconditions.checkNotNull(table_);
     return table_;
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
index 6740f14..a6b6548 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
@@ -18,8 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.RuntimeEnv;
 import org.apache.impala.service.BackendConfig;
@@ -45,9 +45,9 @@ public class AlterViewStmt extends CreateOrAlterViewStmtBase {
     dbName_ = analyzer.getTargetDbName(tableName_);
     owner_ = analyzer.getUser().getName();
 
-    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
+    FeTable table = analyzer.getTable(tableName_, Privilege.ALTER);
     Preconditions.checkNotNull(table);
-    if (!(table instanceof View)) {
+    if (!(table instanceof FeView)) {
       throw new AnalysisException(String.format(
           "ALTER VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
index 3e7f0cc..aa49c03 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -30,8 +30,8 @@ import org.apache.impala.authorization.AuthorizeableTable;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.catalog.AuthorizationException;
-import org.apache.impala.catalog.Db;
-import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.FeCatalog;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
@@ -62,7 +62,7 @@ public class AnalysisContext {
   private final EventSequence timeline_;
 
   // Set in analyzeAndAuthorize().
-  private ImpaladCatalog catalog_;
+  private FeCatalog catalog_;
   private AnalysisResult analysisResult_;
 
   // Use Hive's scheme for auto-generating column labels. Only used for testing.
@@ -75,7 +75,7 @@ public class AnalysisContext {
     timeline_ = timeline;
   }
 
-  public ImpaladCatalog getCatalog() { return catalog_; }
+  public FeCatalog getCatalog() { return catalog_; }
   public TQueryCtx getQueryCtx() { return queryCtx_; }
   public TQueryOptions getQueryOptions() {
     return queryCtx_.client_request.query_options;
@@ -621,7 +621,7 @@ public class AnalysisContext {
    */
   private boolean checkSystemDbAccess(String dbName, Privilege privilege)
       throws AuthorizationException {
-    Db db = catalog_.getDb(dbName);
+    FeDb db = catalog_.getDb(dbName);
     if (db != null && db.isSystemDb()) {
       switch (privilege) {
         case VIEW_METADATA:
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index f0a4b84..f0f7334 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -39,16 +39,16 @@ import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.DataSourceTable;
 import org.apache.impala.catalog.DatabaseNotFoundException;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeCatalog;
+import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.ImpaladCatalog;
 import org.apache.impala.catalog.IncompleteTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.IdGenerator;
 import org.apache.impala.common.ImpalaException;
@@ -349,7 +349,7 @@ public class Analyzer {
   private final ArrayList<Analyzer> ancestors_;
 
   // map from lowercase table alias to a view definition in this analyzer's scope
-  private final Map<String, View> localViews_ = Maps.newHashMap();
+  private final Map<String, FeView> localViews_ = Maps.newHashMap();
 
   // Map from lowercase table alias to descriptor. Tables without an explicit alias
   // are assigned two implicit aliases: the unqualified and fully-qualified table name.
@@ -469,9 +469,9 @@ public class Analyzer {
    * definition with the same alias has already been registered or if the number of
    * explicit column labels is greater than the number of columns in the view statement.
    */
-  public void registerLocalView(View view) throws AnalysisException {
+  public void registerLocalView(FeView view) throws AnalysisException {
     Preconditions.checkState(view.isLocalView());
-    if (view.hasColLabels()) {
+    if (view.getColLabels() != null) {
       List<String> viewLabels = view.getColLabels();
       List<String> queryStmtLabels = view.getQueryStmt().getColLabels();
       if (viewLabels.size() > queryStmtLabels.size()) {
@@ -551,7 +551,7 @@ public class Analyzer {
       String viewAlias = tableRef.getPath().get(0).toLowerCase();
       Analyzer analyzer = this;
       do {
-        View localView = analyzer.localViews_.get(viewAlias);
+        FeView localView = analyzer.localViews_.get(viewAlias);
         if (localView != null) return new InlineViewRef(localView, tableRef);
         analyzer = (analyzer.ancestors_.isEmpty() ? null : analyzer.ancestors_.get(0));
       } while (analyzer != null);
@@ -583,10 +583,10 @@ public class Analyzer {
 
     Preconditions.checkNotNull(resolvedPath);
     if (resolvedPath.destTable() != null) {
-      Table table = resolvedPath.destTable();
-      if (table instanceof View) return new InlineViewRef((View) table, tableRef);
+      FeTable table = resolvedPath.destTable();
+      if (table instanceof FeView) return new InlineViewRef((FeView) table, tableRef);
       // The table must be a base table.
-      Preconditions.checkState(table instanceof HdfsTable ||
+      Preconditions.checkState(table instanceof FeFsTable ||
           table instanceof KuduTable ||
           table instanceof HBaseTable ||
           table instanceof DataSourceTable);
@@ -770,7 +770,7 @@ public class Analyzer {
       List<TableName> candidateTbls = Path.getCandidateTables(rawPath, getDefaultDb());
       for (int tblNameIdx = 0; tblNameIdx < candidateTbls.size(); ++tblNameIdx) {
         TableName tblName = candidateTbls.get(tblNameIdx);
-        Table tbl = null;
+        FeTable tbl = null;
         try {
           tbl = getTable(tblName.getDb(), tblName.getTbl());
         } catch (AnalysisException e) {
@@ -1303,7 +1303,7 @@ public class Analyzer {
   }
 
   public DescriptorTable getDescTbl() { return globalState_.descTbl; }
-  public ImpaladCatalog getCatalog() { return globalState_.stmtTableCache.catalog; }
+  public FeCatalog getCatalog() { return globalState_.stmtTableCache.catalog; }
   public StmtTableCache getStmtTableCache() { return globalState_.stmtTableCache; }
   public Set<String> getAliases() { return aliasMap_.keySet(); }
 
@@ -2354,10 +2354,10 @@ public class Analyzer {
    * Throws a TableLoadingException if the registered table failed to load.
    * Does not register authorization requests or access events.
    */
-  public Table getTable(String dbName, String tableName)
+  public FeTable getTable(String dbName, String tableName)
       throws AnalysisException, TableLoadingException {
     TableName tblName = new TableName(dbName, tableName);
-    Table table = globalState_.stmtTableCache.tables.get(tblName);
+    FeTable table = globalState_.stmtTableCache.tables.get(tblName);
     if (table == null) {
       if (!globalState_.stmtTableCache.dbs.contains(tblName.getDb())) {
         throw new AnalysisException(DB_DOES_NOT_EXIST_ERROR_MSG + tblName.getDb());
@@ -2384,8 +2384,8 @@ public class Analyzer {
    * regardless of the state of the table (i.e. whether it exists, is loaded, etc.).
    * If addAccessEvent is true adds an access event for successfully loaded tables.
    */
-  public Table getTable(TableName tableName, Privilege privilege, boolean addAccessEvent)
-      throws AnalysisException, TableLoadingException {
+  public FeTable getTable(TableName tableName, Privilege privilege,
+      boolean addAccessEvent) throws AnalysisException, TableLoadingException {
     Preconditions.checkNotNull(tableName);
     Preconditions.checkNotNull(privilege);
     tableName = getFqTableName(tableName);
@@ -2396,12 +2396,12 @@ public class Analyzer {
       registerPrivReq(new PrivilegeRequestBuilder()
           .allOf(privilege).onTable(tableName.getDb(), tableName.getTbl()).toRequest());
     }
-    Table table = getTable(tableName.getDb(), tableName.getTbl());
+    FeTable table = getTable(tableName.getDb(), tableName.getTbl());
     Preconditions.checkNotNull(table);
     if (addAccessEvent) {
       // Add an audit event for this access
       TCatalogObjectType objectType = TCatalogObjectType.TABLE;
-      if (table instanceof View) objectType = TCatalogObjectType.VIEW;
+      if (table instanceof FeView) objectType = TCatalogObjectType.VIEW;
       globalState_.accessEvents.add(new TAccessEvent(
           tableName.toString(), objectType, privilege.toString()));
     }
@@ -2416,7 +2416,7 @@ public class Analyzer {
    * AuthorizationException is thrown.
    * If the table or the db does not exist in the Catalog, an AnalysisError is thrown.
    */
-  public Table getTable(TableName tableName, Privilege privilege)
+  public FeTable getTable(TableName tableName, Privilege privilege)
       throws AnalysisException {
     try {
       return getTable(tableName, privilege, true);
@@ -2434,11 +2434,11 @@ public class Analyzer {
    *
    * If the database does not exist in the catalog an AnalysisError is thrown.
    */
-  public Db getDb(String dbName, Privilege privilege) throws AnalysisException {
+  public FeDb getDb(String dbName, Privilege privilege) throws AnalysisException {
     return getDb(dbName, privilege, true);
   }
 
-  public Db getDb(String dbName, Privilege privilege, boolean throwIfDoesNotExist)
+  public FeDb getDb(String dbName, Privilege privilege, boolean throwIfDoesNotExist)
       throws AnalysisException {
     PrivilegeRequestBuilder pb = new PrivilegeRequestBuilder();
     if (privilege == Privilege.ANY) {
@@ -2448,7 +2448,7 @@ public class Analyzer {
       registerPrivReq(pb.allOf(privilege).onDb(dbName).toRequest());
     }
 
-    Db db = getDb(dbName, throwIfDoesNotExist);
+    FeDb db = getDb(dbName, throwIfDoesNotExist);
     globalState_.accessEvents.add(new TAccessEvent(
         dbName, TCatalogObjectType.DATABASE, privilege.toString()));
     return db;
@@ -2457,9 +2457,9 @@ public class Analyzer {
   /**
    * Returns a Catalog Db object without checking for privileges.
    */
-  public Db getDb(String dbName, boolean throwIfDoesNotExist)
+  public FeDb getDb(String dbName, boolean throwIfDoesNotExist)
       throws AnalysisException {
-    Db db = getCatalog().getDb(dbName);
+    FeDb db = getCatalog().getDb(dbName);
     if (db == null && throwIfDoesNotExist) {
       throw new AnalysisException(DB_DOES_NOT_EXIST_ERROR_MSG + dbName);
     }
@@ -2479,7 +2479,7 @@ public class Analyzer {
     registerPrivReq(new PrivilegeRequestBuilder().allOf(privilege)
         .onTable(dbName,  tableName).toRequest());
     try {
-      Db db = getCatalog().getDb(dbName);
+      FeDb db = getCatalog().getDb(dbName);
       if (db == null) {
         throw new DatabaseNotFoundException("Database not found: " + dbName);
       }
@@ -2543,7 +2543,7 @@ public class Analyzer {
         && g.hasEdge(a.asInt(), b.asInt()));
   }
 
-  public Map<String, View> getLocalViews() { return localViews_; }
+  public Map<String, FeView> getLocalViews() { return localViews_; }
 
   /**
    * Add a warning that will be displayed to the user. Ignores null messages. Once
@@ -2575,10 +2575,10 @@ public class Analyzer {
    * for the given table and privilege. The table must be a base table or a
    * catalog view (not a local view).
    */
-  public void registerAuthAndAuditEvent(Table table, Privilege priv) {
+  public void registerAuthAndAuditEvent(FeTable table, Privilege priv) {
     // Add access event for auditing.
-    if (table instanceof View) {
-      View view = (View) table;
+    if (table instanceof FeView) {
+      FeView view = (FeView) table;
       Preconditions.checkState(!view.isLocalView());
       addAccessEvent(new TAccessEvent(
           table.getFullName(), TCatalogObjectType.VIEW,
diff --git a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
index 4f8ccda..3fbc612 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
@@ -17,8 +17,8 @@
 
 package org.apache.impala.analysis;
 
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
@@ -89,7 +89,7 @@ public class BaseTableRef extends TableRef {
    * Analyze the 'skip.header.line.count' property.
    */
   private void analyzeSkipHeaderLineCount() throws AnalysisException {
-    Table table = getTable();
+    FeTable table = getTable();
     if (!(table instanceof HdfsTable)) return;
     HdfsTable hdfsTable = (HdfsTable)table;
 
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
index e7c66e2..7df6e29 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
@@ -32,8 +32,7 @@ import org.json.simple.parser.JSONParser;
 import org.json.simple.parser.ParseException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.Id;
 import org.apache.impala.common.IdGenerator;
 import org.apache.impala.thrift.TEdgeType;
@@ -688,7 +687,7 @@ public class ColumnLineageGraph {
     targetColumnLabels_.addAll(columnLabels);
   }
 
-  public void addTargetColumnLabels(Table dstTable) {
+  public void addTargetColumnLabels(FeTable dstTable) {
     Preconditions.checkNotNull(dstTable);
     String tblFullName = dstTable.getFullName();
     for (String columnName: dstTable.getColumnNames()) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index f20e1c7..dec3f45 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -27,12 +27,14 @@ import java.util.Set;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.PrintUtils;
@@ -106,7 +108,7 @@ public class ComputeStatsStmt extends StatementBase {
   protected final TableSampleClause sampleParams_;
 
   // Set during analysis.
-  protected Table table_;
+  protected FeTable table_;
 
   // Effective sampling percent based on the total number of bytes in the files sample.
   // Set to -1 for non-HDFS tables or if TABLESAMPLE was not specified.
@@ -342,7 +344,7 @@ public class ComputeStatsStmt extends StatementBase {
     }
     table_ = analyzer.getTable(tableName_, Privilege.ALTER);
 
-    if (!(table_ instanceof HdfsTable)) {
+    if (!(table_ instanceof FeFsTable)) {
       if (partitionSet_ != null) {
         throw new AnalysisException("COMPUTE INCREMENTAL ... PARTITION not supported " +
             "for non-HDFS table " + tableName_);
@@ -358,7 +360,7 @@ public class ComputeStatsStmt extends StatementBase {
           throw new AnalysisException(colName + " not found in table: " +
               table_.getName());
         }
-        if (table_ instanceof HdfsTable && table_.isClusteringColumn(col)) {
+        if (table_ instanceof FeFsTable && table_.isClusteringColumn(col)) {
           throw new AnalysisException("COMPUTE STATS not supported for partitioning " +
               "column " + col.getName() + " of HDFS table.");
         }
@@ -370,9 +372,9 @@ public class ComputeStatsStmt extends StatementBase {
       }
     }
 
-    HdfsTable hdfsTable = null;
-    if (table_ instanceof HdfsTable) {
-      hdfsTable = (HdfsTable)table_;
+    FeFsTable hdfsTable = null;
+    if (table_ instanceof FeFsTable) {
+      hdfsTable = (FeFsTable)table_;
       if (hdfsTable.isAvroTable()) checkIncompleteAvroSchema(hdfsTable);
       if (isIncremental_ && hdfsTable.getNumClusteringCols() == 0 &&
           partitionSet_ != null) {
@@ -435,7 +437,9 @@ public class ComputeStatsStmt extends StatementBase {
               " does not have statistics, recomputing stats for the whole table");
         }
 
-        for (HdfsPartition p: hdfsTable.getPartitions()) {
+        Collection<? extends FeFsPartition> allPartitions =
+            hdfsTable.getPartitions();
+        for (FeFsPartition p: allPartitions) {
           if (p.isDefaultPartition()) continue;
           TPartitionStats partStats = p.getPartitionStats();
           if (!p.hasIncrementalStats() || tableIsMissingColStats) {
@@ -460,7 +464,7 @@ public class ComputeStatsStmt extends StatementBase {
         }
       } else {
         // Always compute stats on a set of partitions when told to.
-        for (HdfsPartition targetPartition: partitionSet_.getPartitions()) {
+        for (FeFsPartition targetPartition: partitionSet_.getPartitions()) {
           filterPreds.add(targetPartition.getConjunctSql());
           List<String> partValues = Lists.newArrayList();
           for (LiteralExpr partValue: targetPartition.getPartitionValues()) {
@@ -470,9 +474,11 @@ public class ComputeStatsStmt extends StatementBase {
           expectedPartitions_.add(partValues);
         }
         // Create a hash set out of partitionSet_ for O(1) lookups.
-        HashSet<HdfsPartition> targetPartitions =
+        // TODO(todd) avoid loading all partitions.
+        HashSet<FeFsPartition> targetPartitions =
             Sets.newHashSet(partitionSet_.getPartitions());
-        for (HdfsPartition p : hdfsTable.getPartitions()) {
+        Collection<? extends FeFsPartition> allPartitions = hdfsTable.getPartitions();
+        for (FeFsPartition p : allPartitions) {
           if (p.isDefaultPartition()) continue;
           if (targetPartitions.contains(p)) continue;
           TPartitionStats partStats = p.getPartitionStats();
@@ -583,10 +589,10 @@ public class ComputeStatsStmt extends StatementBase {
    */
   private String analyzeTableSampleClause(Analyzer analyzer) throws AnalysisException {
     if (sampleParams_ == null) return "";
-    if (!(table_ instanceof HdfsTable)) {
+    if (!(table_ instanceof FeFsTable)) {
       throw new AnalysisException("TABLESAMPLE is only supported on HDFS tables.");
     }
-    HdfsTable hdfsTable = (HdfsTable) table_;
+    FeFsTable hdfsTable = (FeFsTable) table_;
     if (!hdfsTable.isStatsExtrapolationEnabled()) {
       throw new AnalysisException(String.format(
           "COMPUTE STATS TABLESAMPLE requires stats extrapolation which is disabled.\n" +
@@ -606,8 +612,9 @@ public class ComputeStatsStmt extends StatementBase {
     // Compute the sample of files and set 'sampleFileBytes_'.
     long minSampleBytes = analyzer.getQueryOptions().compute_stats_min_sample_size;
     long samplePerc = sampleParams_.getPercentBytes();
+    Collection<? extends FeFsPartition> partitions = hdfsTable.getPartitions();
     Map<Long, List<FileDescriptor>> sample = hdfsTable.getFilesSample(
-        hdfsTable.getPartitions(), samplePerc, minSampleBytes, sampleSeed);
+        partitions, samplePerc, minSampleBytes, sampleSeed);
     long sampleFileBytes = 0;
     for (List<FileDescriptor> fds: sample.values()) {
       for (FileDescriptor fd: fds) sampleFileBytes += fd.getFileLength();
@@ -643,7 +650,7 @@ public class ComputeStatsStmt extends StatementBase {
    * AnalysisException for such ill-created Avro tables. Does nothing if
    * the column definitions match the Avro schema exactly.
    */
-  private void checkIncompleteAvroSchema(HdfsTable table) throws AnalysisException {
+  private void checkIncompleteAvroSchema(FeFsTable table) throws AnalysisException {
     Preconditions.checkState(table.isAvroTable());
     org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable();
     // The column definitions from 'CREATE TABLE (column definitions) ...'
@@ -708,8 +715,8 @@ public class ComputeStatsStmt extends StatementBase {
    * the partition level.
    */
   private boolean updateTableStatsOnly() {
-    if (!(table_ instanceof HdfsTable)) return true;
-    return !isIncremental_ && ((HdfsTable) table_).isStatsExtrapolationEnabled();
+    if (!(table_ instanceof FeFsTable)) return true;
+    return !isIncremental_ && ((FeFsTable) table_).isStatsExtrapolationEnabled();
   }
 
   /**
@@ -747,14 +754,15 @@ public class ComputeStatsStmt extends StatementBase {
    * false otherwise.
    */
   public boolean isColumnar() {
-    if (!(table_ instanceof HdfsTable)) return false;
-    Collection<HdfsPartition> affectedPartitions = null;
+    if (!(table_ instanceof FeFsTable)) return false;
+    Collection<? extends FeFsPartition> affectedPartitions = null;
     if (partitionSet_ != null) {
       affectedPartitions = partitionSet_.getPartitions();
     } else {
-      affectedPartitions = ((HdfsTable) table_).getPartitions();
+      FeFsTable hdfsTable = (FeFsTable)table_;
+      affectedPartitions = hdfsTable.getPartitions();
     }
-    for (HdfsPartition partition: affectedPartitions) {
+    for (FeFsPartition partition: affectedPartitions) {
       if (partition.getFileFormat() != HdfsFileFormat.PARQUET
           && partition.getFileFormat() != HdfsFileFormat.ORC)
         return false;
@@ -794,10 +802,10 @@ public class ComputeStatsStmt extends StatementBase {
     params.setExpect_all_partitions(expectAllPartitions_);
     if (!expectAllPartitions_) params.setExpected_partitions(expectedPartitions_);
     if (isIncremental_) {
-      params.setNum_partition_cols(((HdfsTable)table_).getNumClusteringCols());
+      params.setNum_partition_cols(((FeFsTable)table_).getNumClusteringCols());
     }
-    if (table_ instanceof HdfsTable) {
-      params.setTotal_file_bytes(((HdfsTable)table_).getTotalHdfsBytes());
+    if (table_ instanceof FeFsTable) {
+      params.setTotal_file_bytes(((FeFsTable)table_).getTotalHdfsBytes());
     }
     return params;
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
index f71f269..6b25940 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
@@ -20,7 +20,7 @@ package org.apache.impala.analysis;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TCreateDbParams;
@@ -92,7 +92,7 @@ public class CreateDbStmt extends StatementBase {
     // this Impala instance. If that happens, the caller will not get an
     // AnalysisException when creating the database, they will get a Hive
     // AlreadyExistsException once the request has been sent to the metastore.
-    Db db = analyzer.getDb(getDb(), Privilege.CREATE, false);
+    FeDb db = analyzer.getDb(getDb(), Privilege.CREATE, false);
     if (db != null && !ifNotExists_) {
       throw new AnalysisException(Analyzer.DB_ALREADY_EXISTS_ERROR_MSG + getDb());
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
index cd86e93..10c0907 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
@@ -26,7 +26,7 @@ import org.apache.impala.authorization.AuthorizeableFn;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.catalog.ImpaladCatalog;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
@@ -65,7 +65,7 @@ public abstract class CreateFunctionStmtBase extends StatementBase {
   protected Function fn_;
 
   // Db object for function fn_. Set in analyze().
-  protected Db db_;
+  protected FeDb db_;
 
   // Set in analyze()
   protected String sqlString_;
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
index 5c8d939..3e03ef4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
@@ -23,11 +23,12 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.rewrite.ExprRewriter;
@@ -188,7 +189,7 @@ public class CreateTableAsSelectStmt extends StatementBase {
 
     // The full privilege check for the database will be done as part of the INSERT
     // analysis.
-    Db db = analyzer.getDb(createStmt_.getDb(), Privilege.ANY);
+    FeDb db = analyzer.getDb(createStmt_.getDb(), Privilege.ANY);
     if (db == null) {
       throw new AnalysisException(
           Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + createStmt_.getDb());
@@ -202,21 +203,23 @@ public class CreateTableAsSelectStmt extends StatementBase {
     org.apache.hadoop.hive.metastore.api.Table msTbl =
         CatalogOpExecutor.createMetaStoreTable(createStmt_.toThrift());
 
-    try (MetaStoreClient client = analyzer.getCatalog().getMetaStoreClient()) {
+    try {
       // Set a valid location of this table using the same rules as the metastore. If the
       // user specified a location for the table this will be a no-op.
       msTbl.getSd().setLocation(analyzer.getCatalog().getTablePath(msTbl).toString());
 
-      Table tmpTable = null;
+      FeTable tmpTable = null;
       if (KuduTable.isKuduTable(msTbl)) {
-        tmpTable = KuduTable.createCtasTarget(db, msTbl, createStmt_.getColumnDefs(),
+        // TODO(todd): avoid downcast to 'Db' here
+        tmpTable = KuduTable.createCtasTarget((Db)db, msTbl, createStmt_.getColumnDefs(),
             createStmt_.getPrimaryKeyColumnDefs(),
             createStmt_.getKuduPartitionParams());
       } else if (HdfsFileFormat.isHdfsInputFormatClass(msTbl.getSd().getInputFormat())) {
-        tmpTable = HdfsTable.createCtasTarget(db, msTbl);
+        // TODO(todd): avoid downcast to 'Db' here
+        tmpTable = HdfsTable.createCtasTarget((Db)db, msTbl);
       }
       Preconditions.checkState(tmpTable != null &&
-          (tmpTable instanceof HdfsTable || tmpTable instanceof KuduTable));
+          (tmpTable instanceof FeFsTable || tmpTable instanceof KuduTable));
 
       insertStmt_.setTargetTable(tmpTable);
     } catch (Exception e) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
index 041ab9e..e0d0630 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
@@ -21,8 +21,8 @@ import java.util.List;
 
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAccessEvent;
 import org.apache.impala.thrift.TCatalogObjectType;
@@ -159,7 +159,7 @@ public class CreateTableLikeStmt extends StatementBase {
     }
 
     // Make sure the source table exists and the user has permission to access it.
-    Table srcTable = analyzer.getTable(srcTableName_, Privilege.VIEW_METADATA);
+    FeTable srcTable = analyzer.getTable(srcTableName_, Privilege.VIEW_METADATA);
     if (KuduTable.isKuduTable(srcTable.getMetaStoreTable())) {
       throw new AnalysisException("Cloning a Kudu table using CREATE TABLE LIKE is " +
           "not supported.");
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
index 9de5054..1e70468 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
@@ -23,10 +23,9 @@ import java.util.List;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.impala.analysis.Path.PathType;
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TDescribeOutputStyle;
@@ -60,7 +59,7 @@ public class DescribeTableStmt extends StatementBase {
   private Path path_;
 
   /// The fully qualified name of the root table, set after analysis.
-  private Table table_;
+  private FeTable table_;
 
   /// Struct type with the fields to display for the described path.
   /// Only set when describing a path to a nested collection.
@@ -84,7 +83,7 @@ public class DescribeTableStmt extends StatementBase {
     return sb.toString() + StringUtils.join(rawPath_, ".");
   }
 
-  public Table getTable() { return table_; }
+  public FeTable getTable() { return table_; }
   public TDescribeOutputStyle getOutputStyle() { return outputStyle_; }
 
   @Override
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
index 7728d94..d8e369c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
@@ -24,11 +24,11 @@ import java.util.HashSet;
 import java.util.List;
 
 import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.IdGenerator;
 import org.apache.impala.thrift.TColumnType;
 import org.apache.impala.thrift.TDescriptorTable;
@@ -50,9 +50,9 @@ public class DescriptorTable {
   // The target table of a table sink, may be null.
   // Table id 0 is reserved for it. Set in QueryStmt.analyze() that produces a table sink,
   // e.g. InsertStmt.analyze(), ModifyStmt.analyze().
-  private Table targetTable_;
+  private FeTable targetTable_;
   // For each table, the set of partitions that are referenced by at least one scan range.
-  private final HashMap<Table, HashSet<Long>> referencedPartitionsPerTable_ =
+  private final HashMap<FeTable, HashSet<Long>> referencedPartitionsPerTable_ =
       Maps.newHashMap();
   // 0 is reserved for table sinks
   public static final int TABLE_SINK_ID = 0;
@@ -104,13 +104,13 @@ public class DescriptorTable {
   public Collection<SlotDescriptor> getSlotDescs() { return slotDescs_.values(); }
   public SlotId getMaxSlotId() { return slotIdGenerator_.getMaxId(); }
 
-  public void setTargetTable(Table table) { targetTable_ = table; }
+  public void setTargetTable(FeTable table) { targetTable_ = table; }
 
   /**
    * Find the set of referenced partitions for the given table.  Allocates a set if
    * none has been allocated for the table yet.
    */
-  private HashSet<Long> getReferencedPartitions(Table table) {
+  private HashSet<Long> getReferencedPartitions(FeTable table) {
     HashSet<Long> refPartitions = referencedPartitionsPerTable_.get(table);
     if (refPartitions == null) {
       refPartitions = new HashSet<Long>();
@@ -123,7 +123,7 @@ public class DescriptorTable {
    * Add the partition with ID partitionId to the set of referenced partitions for the
    * given table.
    */
-  public void addReferencedPartition(Table table, long partitionId) {
+  public void addReferencedPartition(FeTable table, long partitionId) {
     getReferencedPartitions(table).add(Long.valueOf(partitionId));
   }
 
@@ -152,9 +152,9 @@ public class DescriptorTable {
   public TDescriptorTable toThrift() {
     TDescriptorTable result = new TDescriptorTable();
     // Maps from base table to its table id used in the backend.
-    HashMap<Table, Integer> tableIdMap = Maps.newHashMap();
+    HashMap<FeTable, Integer> tableIdMap = Maps.newHashMap();
     // Used to check table level consistency
-    HashMap<TableName, Table> referencedTables = Maps.newHashMap();
+    HashMap<TableName, FeTable> referencedTables = Maps.newHashMap();
 
     if (targetTable_ != null) {
       tableIdMap.put(targetTable_, TABLE_SINK_ID);
@@ -164,13 +164,13 @@ public class DescriptorTable {
       // inline view of a non-constant select has a non-materialized tuple descriptor
       // in the descriptor table just for type checking, which we need to skip
       if (!tupleDesc.isMaterialized()) continue;
-      Table table = tupleDesc.getTable();
+      FeTable table = tupleDesc.getTable();
       Integer tableId = tableIdMap.get(table);
-      if (table != null && !(table instanceof View)) {
+      if (table != null && !(table instanceof FeView)) {
         TableName tblName = table.getTableName();
         // Verify table level consistency in the same query by checking that references to
         // the same Table refer to the same table instance.
-        Table checkTable = referencedTables.get(tblName);
+        FeTable checkTable = referencedTables.get(tblName);
         Preconditions.checkState(checkTable == null || table == checkTable);
         if (tableId == null) {
           tableId = nextTableId_++;
@@ -188,7 +188,7 @@ public class DescriptorTable {
         result.addToSlotDescriptors(slotD.toThrift());
       }
     }
-    for (Table tbl: tableIdMap.keySet()) {
+    for (FeTable tbl: tableIdMap.keySet()) {
       HashSet<Long> referencedPartitions = null; // null means include all partitions.
       // We don't know which partitions are needed for INSERT, so do not prune partitions.
       if (tbl != targetTable_) referencedPartitions = getReferencedPartitions(tbl);
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
index e832878..40b41aa 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
@@ -18,7 +18,7 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TDropDbParams;
 
@@ -64,7 +64,7 @@ public class DropDbStmt extends StatementBase {
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
-    Db db = analyzer.getDb(dbName_, Privilege.DROP, false);
+    FeDb db = analyzer.getDb(dbName_, Privilege.DROP, false);
     if (db == null && !ifExists_) {
       throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + dbName_);
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
index 2784aaf..03275b5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
@@ -22,7 +22,7 @@ import java.util.ArrayList;
 import org.apache.impala.authorization.AuthorizeableFn;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
@@ -88,7 +88,7 @@ public class DropFunctionStmt extends StatementBase {
     analyzer.registerPrivReq(new PrivilegeRequest(
         new AuthorizeableFn(desc_.dbName(), desc_.signatureString()), Privilege.DROP));
 
-    Db db =  analyzer.getDb(desc_.dbName(), false);
+    FeDb db =  analyzer.getDb(desc_.dbName(), false);
     if (db == null && !ifExists_) {
       throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + desc_.dbName());
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
index 72769d0..3e43a01 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
@@ -20,9 +20,9 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.TableLoadingException;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAccessEvent;
 import org.apache.impala.thrift.TCatalogObjectType;
@@ -96,13 +96,13 @@ public class DropTableOrViewStmt extends StatementBase {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     dbName_ = analyzer.getTargetDbName(tableName_);
     try {
-      Table table = analyzer.getTable(tableName_, Privilege.DROP, true);
+      FeTable table = analyzer.getTable(tableName_, Privilege.DROP, true);
       Preconditions.checkNotNull(table);
-      if (table instanceof View && dropTable_) {
+      if (table instanceof FeView && dropTable_) {
         throw new AnalysisException(String.format(
             "DROP TABLE not allowed on a view: %s.%s", dbName_, getTbl()));
       }
-      if (!(table instanceof View) && !dropTable_) {
+      if (!(table instanceof FeView) && !dropTable_) {
         throw new AnalysisException(String.format(
             "DROP VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
index 321cfbd..d8d44ec 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
@@ -23,6 +23,7 @@ import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.AggregateFunction;
 import org.apache.impala.catalog.Catalog;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.ImpaladCatalog;
 import org.apache.impala.catalog.ScalarFunction;
@@ -474,7 +475,7 @@ public class FunctionCallExpr extends Expr {
     }
 
     // User needs DB access.
-    Db db = analyzer.getDb(fnName_.getDb(), Privilege.VIEW_METADATA, true);
+    FeDb db = analyzer.getDb(fnName_.getDb(), Privilege.VIEW_METADATA, true);
     if (!db.containsFunction(fnName_.getFunction())) {
       throw new AnalysisException(fnName_ + "() unknown");
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index df6779d..b1996bb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -22,9 +22,9 @@ import java.util.HashSet;
 import java.util.List;
 
 import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.rewrite.ExprRewriter;
 import org.slf4j.Logger;
@@ -43,7 +43,7 @@ public class InlineViewRef extends TableRef {
 
   // Catalog or local view that is referenced.
   // Null for inline views parsed directly from a query string.
-  private final View view_;
+  private final FeView view_;
 
   // If not null, these will serve as the column labels for the inline view. This provides
   // a layer of separation between column labels visible from outside the inline view
@@ -93,7 +93,7 @@ public class InlineViewRef extends TableRef {
   /**
    * C'tor for creating inline views that replace a local or catalog view ref.
    */
-  public InlineViewRef(View view, TableRef origTblRef) {
+  public InlineViewRef(FeView view, TableRef origTblRef) {
     super(view.getTableName().toPath(), origTblRef.getExplicitAlias(),
         origTblRef.getPrivilege());
     queryStmt_ = view.getQueryStmt().clone();
diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index 7aa9e8e..493f14b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -25,11 +25,12 @@ import java.util.Set;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequestBuilder;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
@@ -114,7 +115,7 @@ public class InsertStmt extends StatementBase {
   private QueryStmt queryStmt_;
 
   // Set in analyze(). Contains metadata of target table to determine type of sink.
-  private Table table_;
+  private FeTable table_;
 
   // Set in analyze(). Exprs correspond to the partitionKeyValues, if specified, or to
   // the partition columns for Kudu tables.
@@ -423,7 +424,7 @@ public class InsertStmt extends StatementBase {
     }
 
     // We do not support (in|up)serting into views.
-    if (table_ instanceof View) {
+    if (table_ instanceof FeView) {
       throw new AnalysisException(
           String.format("Impala does not support %sing into views: %s", getOpName(),
               table_.getFullName()));
@@ -653,7 +654,7 @@ public class InsertStmt extends StatementBase {
    *           If an expression is not compatible with its target column
    */
   private void prepareExpressions(List<Column> selectExprTargetColumns,
-      List<Expr> selectListExprs, Table tbl, Analyzer analyzer)
+      List<Expr> selectListExprs, FeTable tbl, Analyzer analyzer)
       throws AnalysisException {
     // Temporary lists of partition key exprs and names in an arbitrary order.
     List<Expr> tmpPartitionKeyExprs = new ArrayList<Expr>();
@@ -843,8 +844,8 @@ public class InsertStmt extends StatementBase {
 
   public List<PlanHint> getPlanHints() { return planHints_; }
   public TableName getTargetTableName() { return targetTableName_; }
-  public Table getTargetTable() { return table_; }
-  public void setTargetTable(Table table) { this.table_ = table; }
+  public FeTable getTargetTable() { return table_; }
+  public void setTargetTable(FeTable table) { this.table_ = table; }
   public boolean isOverwrite() { return overwrite_; }
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java b/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
index 2629be2..c5bbd91 100644
--- a/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
@@ -18,11 +18,11 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Function.CompareMode;
 import org.apache.impala.catalog.ScalarFunction;
 import org.apache.impala.catalog.ScalarType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.Reference;
@@ -142,7 +142,7 @@ public class IsNullPredicate extends Predicate {
     if (isSingleColumnPredicate(slotRefRef, null)) {
       SlotDescriptor slotDesc = slotRefRef.getRef().getDesc();
       if (!slotDesc.getStats().hasNulls()) return;
-      Table table = slotDesc.getParent().getTable();
+      FeTable table = slotDesc.getParent().getTable();
       if (table != null && table.getNumRows() > 0) {
         long numRows = table.getNumRows();
         if (isNotNull_) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index ddc0c6e..3116eb4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
@@ -107,7 +107,7 @@ public class LoadDataStmt extends StatementBase {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     dbName_ = analyzer.getTargetDbName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.INSERT);
+    FeTable table = analyzer.getTable(tableName_, Privilege.INSERT);
     if (!(table instanceof HdfsTable)) {
       throw new AnalysisException("LOAD DATA only supported for HDFS tables: " +
           dbName_ + "." + getTbl());
@@ -205,7 +205,7 @@ public class LoadDataStmt extends StatementBase {
           "target table (%s) because Impala does not have WRITE access to HDFS " +
           "location: ", hdfsTable.getFullName());
 
-      HdfsPartition partition;
+      FeFsPartition partition;
       String location;
       if (partitionSpec_ != null) {
         partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
diff --git a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
index c73109e..ec24685 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
@@ -26,8 +26,8 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.Pair;
@@ -148,7 +148,7 @@ public abstract class ModifyStmt extends StatementBase {
     }
 
     Preconditions.checkNotNull(targetTableRef_);
-    Table dstTbl = targetTableRef_.getTable();
+    FeTable dstTbl = targetTableRef_.getTable();
     // Only Kudu tables can be updated
     if (!(dstTbl instanceof KuduTable)) {
       throw new AnalysisException(
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java b/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
index b5443cb..c8e0a4f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
@@ -21,8 +21,8 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -81,7 +81,7 @@ public class PartitionDef implements ParseNode {
       location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
     }
 
-    Table table;
+    FeTable table;
     try {
       table = analyzer.getTable(partitionSpec_.getTableName(), Privilege.ALTER,
           false);
@@ -89,8 +89,8 @@ public class PartitionDef implements ParseNode {
       throw new AnalysisException(e.getMessage(), e);
     }
 
-    Preconditions.checkState(table instanceof HdfsTable);
-    HdfsTable hdfsTable = (HdfsTable)table;
+    Preconditions.checkState(table instanceof FeFsTable);
+    FeFsTable hdfsTable = (FeFsTable)table;
 
     boolean shouldCache;
     if (cacheOp_ != null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
index 3ff4ed7..f04504c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
@@ -22,8 +22,8 @@ import java.util.Set;
 
 import org.apache.impala.analysis.BinaryPredicate.Operator;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.Reference;
@@ -44,13 +44,13 @@ public class PartitionSet extends PartitionSpecBase {
   private final List<Expr> partitionExprs_;
 
   // Result of analysis
-  private List<HdfsPartition> partitions_ = Lists.newArrayList();
+  private List<FeFsPartition> partitions_ = Lists.newArrayList();
 
   public PartitionSet(List<Expr> partitionExprs) {
     this.partitionExprs_ = ImmutableList.copyOf(partitionExprs);
   }
 
-  public List<HdfsPartition> getPartitions() { return partitions_; }
+  public List<FeFsPartition> getPartitions() { return partitions_; }
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
@@ -107,7 +107,7 @@ public class PartitionSet extends PartitionSpecBase {
   // specified partition specs add IF EXISTS by setting partitionShouldExists_ to null.
   // The given conjuncts are assumed to only reference partition columns.
   private void addIfExists(
-      Analyzer analyzer, Table table, List<Expr> conjuncts) {
+      Analyzer analyzer, FeTable table, List<Expr> conjuncts) {
     boolean add = false;
     Set<String> partColNames = Sets.newHashSet();
     Reference<SlotRef> slotRef = new Reference<>();
@@ -173,7 +173,7 @@ public class PartitionSet extends PartitionSpecBase {
 
   public List<List<TPartitionKeyValue>> toThrift() {
     List<List<TPartitionKeyValue>> thriftPartitionSet = Lists.newArrayList();
-    for (HdfsPartition hdfsPartition : partitions_) {
+    for (FeFsPartition hdfsPartition : partitions_) {
       List<TPartitionKeyValue> thriftPartitionSpec = Lists.newArrayList();
       for (int i = 0; i < table_.getNumClusteringCols(); ++i) {
         String key = table_.getColumns().get(i).getName();
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
index 1a7c1a1..4d432e2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
@@ -18,8 +18,8 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
@@ -72,7 +72,7 @@ public abstract class PartitionSpecBase implements ParseNode {
 
     // Skip adding an audit event when analyzing partitions. The parent table should
     // be audited outside of the PartitionSpec.
-    Table table;
+    FeTable table;
     try {
       table = analyzer.getTable(tableName_, privilegeRequirement_, false);
     } catch (TableLoadingException e) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/Path.java b/fe/src/main/java/org/apache/impala/analysis/Path.java
index 15b57b4..cfcd67e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Path.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Path.java
@@ -22,10 +22,10 @@ import java.util.List;
 
 import org.apache.impala.catalog.ArrayType;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.MapType;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 
 import com.google.common.base.Joiner;
@@ -129,7 +129,7 @@ public class Path {
   // Catalog table that this resolved path is rooted at, if any.
   // Null if the path is rooted at a registered tuple that does not
   // belong to a catalog table/view.
-  private final Table rootTable_;
+  private final FeTable rootTable_;
 
   // Root path that a relative path was created from.
   private final Path rootPath_;
@@ -165,7 +165,7 @@ public class Path {
   /**
    * Constructs a Path rooted at the given rootTable.
    */
-  public Path(Table rootTable, List<String> rawPath) {
+  public Path(FeTable rootTable, List<String> rawPath) {
     Preconditions.checkNotNull(rootTable);
     Preconditions.checkNotNull(rawPath);
     rootTable_ = rootTable;
@@ -293,7 +293,7 @@ public class Path {
     return result;
   }
 
-  public Table getRootTable() { return rootTable_; }
+  public FeTable getRootTable() { return rootTable_; }
   public TupleDescriptor getRootDesc() { return rootDesc_; }
   public boolean isRootedAtTable() { return rootTable_ != null; }
   public boolean isRootedAtTuple() { return rootDesc_ != null; }
@@ -336,7 +336,7 @@ public class Path {
     return null;
   }
 
-  public Table destTable() {
+  public FeTable destTable() {
     Preconditions.checkState(isResolved_);
     if (rootTable_ != null && rootDesc_ == null && matchedTypes_.isEmpty()) {
       return rootTable_;
diff --git a/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java b/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
index d848383..610ffa9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
@@ -21,10 +21,10 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.RolePrivilege;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.TableLoadingException;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TPrivilege;
 import org.apache.impala.thrift.TPrivilegeLevel;
@@ -237,8 +237,8 @@ public class PrivilegeSpec implements ParseNode {
       throw new AnalysisException("Only 'SELECT' privileges are allowed " +
           "in a column privilege spec.");
     }
-    Table table = analyzeTargetTable(analyzer);
-    if (table instanceof View) {
+    FeTable table = analyzeTargetTable(analyzer);
+    if (table instanceof FeView) {
       throw new AnalysisException("Column-level privileges on views are not " +
           "supported.");
     }
@@ -266,14 +266,14 @@ public class PrivilegeSpec implements ParseNode {
    * 3. Table does not exist.
    * 4. The privilege level is not supported on tables, e.g. CREATE.
    */
-  private Table analyzeTargetTable(Analyzer analyzer) throws AnalysisException {
+  private FeTable analyzeTargetTable(Analyzer analyzer) throws AnalysisException {
     Preconditions.checkState(scope_ == TPrivilegeScope.TABLE ||
         scope_ == TPrivilegeScope.COLUMN);
     Preconditions.checkState(!Strings.isNullOrEmpty(tableName_.getTbl()));
     if (privilegeLevel_ == TPrivilegeLevel.CREATE) {
       throw new AnalysisException("Create-level privileges on tables are not supported.");
     }
-    Table table = null;
+    FeTable table = null;
     try {
       dbName_ = analyzer.getTargetDbName(tableName_);
       Preconditions.checkNotNull(dbName_);
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index 8629663..d7ec04f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -24,9 +24,9 @@ import java.util.Set;
 
 import org.apache.impala.analysis.Path.PathType;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ColumnAliasGenerator;
@@ -473,7 +473,7 @@ public class SelectStmt extends QueryStmt {
       // The resolved path targets a registered tuple descriptor of a catalog
       // table. Expand the '*' based on the Hive-column order.
       TupleDescriptor tupleDesc = resolvedPath.destTupleDesc();
-      Table table = tupleDesc.getTable();
+      FeTable table = tupleDesc.getTable();
       for (Column c: table.getColumnsInHiveOrder()) {
         addStarResultExpr(resolvedPath, analyzer, c.getName());
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
index 414bbd7..9dc6052 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
@@ -20,7 +20,7 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TFunctionCategory;
@@ -56,7 +56,7 @@ public class ShowCreateFunctionStmt extends StatementBase {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     functionName_.analyze(analyzer);
-    Db db = analyzer.getDb(functionName_.getDb(), Privilege.VIEW_METADATA);
+    FeDb db = analyzer.getDb(functionName_.getDb(), Privilege.VIEW_METADATA);
     List<Function> functions = db.getFunctions(category_, functionName_.getFunction());
     if (functions.isEmpty()) {
       throw new AnalysisException("Function " + functionName_.getFunction() + "() " +
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
index 216562b..be9c2de 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
@@ -20,8 +20,8 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TCatalogObjectType;
 import org.apache.impala.thrift.TTableName;
@@ -60,9 +60,9 @@ public class ShowCreateTableStmt extends StatementBase {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     tableName_ = analyzer.getFqTableName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
-    if (table instanceof View) {
-      View view = (View)table;
+    FeTable table = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
+    if (table instanceof FeView) {
+      FeView view = (FeView)table;
       // Analyze the view query statement with its own analyzer for authorization.
       Analyzer viewAnalyzer = new Analyzer(analyzer);
       // Only show the view's query if the user has permissions to execute the query, to
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
index ee749d0..62eceb0 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
@@ -20,8 +20,8 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TShowFilesParams;
 import org.apache.impala.thrift.TTableName;
@@ -42,7 +42,7 @@ public class ShowFilesStmt extends StatementBase {
   private final PartitionSet partitionSet_;
 
   // Set during analysis.
-  protected Table table_;
+  protected FeTable table_;
 
   public ShowFilesStmt(TableName tableName, PartitionSet partitionSet) {
     tableName_ = Preconditions.checkNotNull(tableName);
@@ -75,7 +75,7 @@ public class ShowFilesStmt extends StatementBase {
     }
     table_ = tableRef.getTable();
     Preconditions.checkNotNull(table_);
-    if (!(table_ instanceof HdfsTable)) {
+    if (!(table_ instanceof FeFsTable)) {
       throw new AnalysisException(String.format(
           "SHOW FILES not applicable to a non hdfs table: %s", tableName_));
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
index 91dfe75..5a7b3bc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
@@ -20,10 +20,10 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TShowStatsOp;
 import org.apache.impala.thrift.TShowStatsParams;
@@ -39,7 +39,7 @@ public class ShowStatsStmt extends StatementBase {
   protected final TableName tableName_;
 
   // Set during analysis.
-  protected Table table_;
+  protected FeTable table_;
 
   public ShowStatsStmt(TableName tableName, TShowStatsOp op) {
     op_ = Preconditions.checkNotNull(op);
@@ -75,11 +75,11 @@ public class ShowStatsStmt extends StatementBase {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     table_ = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
     Preconditions.checkNotNull(table_);
-    if (table_ instanceof View) {
+    if (table_ instanceof FeView) {
       throw new AnalysisException(String.format(
           "%s not applicable to a view: %s", getSqlPrefix(), table_.getFullName()));
     }
-    if (table_ instanceof HdfsTable) {
+    if (table_ instanceof FeFsTable) {
       if (table_.getNumClusteringCols() == 0 && op_ == TShowStatsOp.PARTITIONS) {
         throw new AnalysisException("Table is not partitioned: " + table_.getFullName());
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
index 0a945bd..ee0fbf6 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
@@ -22,7 +22,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.impala.analysis.Path.PathType;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
@@ -114,7 +114,7 @@ public class SlotRef extends Expr {
     }
 
     numDistinctValues_ = desc_.getStats().getNumDistinctValues();
-    Table rootTable = resolvedPath.getRootTable();
+    FeTable rootTable = resolvedPath.getRootTable();
     if (rootTable != null && rootTable.getNumRows() > 0) {
       // The NDV cannot exceed the #rows in the table.
       numDistinctValues_ = Math.min(numDistinctValues_, rootTable.getNumRows());
diff --git a/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java b/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
index be71161..1fc8a44 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
@@ -21,10 +21,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.impala.catalog.Db;
-import org.apache.impala.catalog.ImpaladCatalog;
-import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
+import org.apache.impala.catalog.FeCatalog;
+import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.service.Frontend;
 import org.apache.impala.util.EventSequence;
@@ -55,7 +55,7 @@ public class StmtMetadataLoader {
 
   // Results of the loading process. See StmtTableCache.
   private final Set<String> dbs_ = Sets.newHashSet();
-  private final Map<TableName, Table> loadedTbls_ = Maps.newHashMap();
+  private final Map<TableName, FeTable> loadedTbls_ = Maps.newHashMap();
 
   // Metrics for the metadata load.
   // Number of prioritizedLoad() RPCs issued to the catalogd.
@@ -71,12 +71,12 @@ public class StmtMetadataLoader {
    * in the Catalog at the time this StmtTableCache was generated.
    */
   public static final class StmtTableCache {
-    public final ImpaladCatalog catalog;
+    public final FeCatalog catalog;
     public final Set<String> dbs;
-    public final Map<TableName, Table> tables;
+    public final Map<TableName, FeTable> tables;
 
-    public StmtTableCache(ImpaladCatalog catalog, Set<String> dbs,
-        Map<TableName, Table> tables) {
+    public StmtTableCache(FeCatalog catalog, Set<String> dbs,
+        Map<TableName, FeTable> tables) {
       this.catalog = Preconditions.checkNotNull(catalog);
       this.dbs = Preconditions.checkNotNull(dbs);
       this.tables = Preconditions.checkNotNull(tables);
@@ -136,7 +136,7 @@ public class StmtMetadataLoader {
     Preconditions.checkState(dbs_.isEmpty() && loadedTbls_.isEmpty());
     Preconditions.checkState(numLoadRequestsSent_ == 0);
     Preconditions.checkState(numCatalogUpdatesReceived_ == 0);
-    ImpaladCatalog catalog = fe_.getCatalog();
+    FeCatalog catalog = fe_.getCatalog();
     Set<TableName> missingTbls = getMissingTables(catalog, tbls);
     // There are no missing tables. Return to avoid making an RPC to the CatalogServer
     // and adding events to the timeline.
@@ -172,7 +172,9 @@ public class StmtMetadataLoader {
       }
 
       // Catalog may have been restarted, always use the latest reference.
-      ImpaladCatalog currCatalog = fe_.getCatalog();
+      // TODO(todd) is this necessary in the case of the LocalCatalog impl?
+      // or maybe the whole loadTables() function is unnecessarily complex in that case?
+      FeCatalog currCatalog = fe_.getCatalog();
       boolean hasCatalogRestarted = currCatalog != catalog;
       if (hasCatalogRestarted && LOG.isWarnEnabled()) {
         LOG.warn(String.format(
@@ -237,23 +239,23 @@ public class StmtMetadataLoader {
    * Path.getCandidateTables(). Non-existent tables are ignored and not returned or
    * added to 'loadedTbls_'.
    */
-  private Set<TableName> getMissingTables(ImpaladCatalog catalog, Set<TableName> tbls) {
+  private Set<TableName> getMissingTables(FeCatalog catalog, Set<TableName> tbls) {
     Set<TableName> missingTbls = Sets.newHashSet();
     Set<TableName> viewTbls = Sets.newHashSet();
     for (TableName tblName: tbls) {
       if (loadedTbls_.containsKey(tblName)) continue;
-      Db db = catalog.getDb(tblName.getDb());
+      FeDb db = catalog.getDb(tblName.getDb());
       if (db == null) continue;
       dbs_.add(tblName.getDb());
-      Table tbl = db.getTable(tblName.getTbl());
+      FeTable tbl = db.getTable(tblName.getTbl());
       if (tbl == null) continue;
       if (!tbl.isLoaded()) {
         missingTbls.add(tblName);
         continue;
       }
       loadedTbls_.put(tblName, tbl);
-      if (tbl instanceof View) {
-        viewTbls.addAll(collectTableCandidates(((View) tbl).getQueryStmt()));
+      if (tbl instanceof FeView) {
+        viewTbls.addAll(collectTableCandidates(((FeView) tbl).getQueryStmt()));
       }
     }
     // Recursively collect loaded/missing tables from loaded views.
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableDef.java b/fe/src/main/java/org/apache/impala/analysis/TableDef.java
index 529594b..d11193e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableDef.java
@@ -25,10 +25,10 @@ import java.util.Set;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.RowFormat;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.thrift.TAccessEvent;
@@ -307,9 +307,9 @@ class TableDef {
    * must be an HDFS table. If there are errors during the analysis, this will throw an
    * AnalysisException.
    */
-  public static void analyzeSortColumns(List<String> sortCols, Table table)
+  public static void analyzeSortColumns(List<String> sortCols, FeTable table)
       throws AnalysisException {
-    Preconditions.checkState(table instanceof HdfsTable);
+    Preconditions.checkState(table instanceof FeFsTable);
     analyzeSortColumns(sortCols, Column.toColumnNames(table.getNonClusteringColumns()),
         Column.toColumnNames(table.getClusteringColumns()));
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index 80cdea8..86cc25b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -22,8 +22,8 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.planner.JoinNode.DistributionMode;
 import org.apache.impala.rewrite.ExprRewriter;
@@ -268,7 +268,7 @@ public class TableRef implements ParseNode {
     return null;
   }
 
-  public Table getTable() {
+  public FeTable getTable() {
     Preconditions.checkNotNull(resolvedPath_);
     return resolvedPath_.getRootTable();
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index a4129c4..444e211 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.ql.parse.HiveLexer;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsCompression;
@@ -50,7 +52,6 @@ import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.RowFormat;
 import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
 import org.apache.impala.util.KuduUtil;
 
 /**
@@ -218,9 +219,9 @@ public class ToSqlUtils {
    * Returns a "CREATE TABLE" or "CREATE VIEW" statement that creates the specified
    * table.
    */
-  public static String getCreateTableSql(Table table) throws CatalogException {
+  public static String getCreateTableSql(FeTable table) throws CatalogException {
     Preconditions.checkNotNull(table);
-    if (table instanceof View) return getCreateViewSql((View)table);
+    if (table instanceof FeView) return getCreateViewSql((FeView)table);
     org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable();
     // Use a LinkedHashMap to preserve the ordering of the table properties.
     LinkedHashMap<String, String> properties = Maps.newLinkedHashMap(msTable.getParameters());
@@ -405,7 +406,7 @@ public class ToSqlUtils {
     return sb.toString();
   }
 
-  public static String getCreateViewSql(View view) {
+  public static String getCreateViewSql(FeView view) {
     StringBuffer sb = new StringBuffer();
     sb.append("CREATE VIEW ");
     // Use toSql() to ensure that the table name and query statement are normalized
diff --git a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
index 4f5dbfb..81f8048 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
@@ -20,8 +20,8 @@ package org.apache.impala.analysis;
 import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TTruncateParams;
 
@@ -39,7 +39,7 @@ public class TruncateStmt extends StatementBase {
   private final boolean ifExists_;
 
   // Set in analyze().
-  private Table table_;
+  private FeTable table_;
 
   public TruncateStmt(TableName tableName, boolean ifExists) {
     Preconditions.checkNotNull(tableName);
@@ -63,7 +63,7 @@ public class TruncateStmt extends StatementBase {
       throw e;
     }
     // We only support truncating hdfs tables now.
-    if (!(table_ instanceof HdfsTable)) {
+    if (!(table_ instanceof FeFsTable)) {
       throw new AnalysisException(String.format(
           "TRUNCATE TABLE not supported on non-HDFS table: %s", table_.getFullName()));
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index 6c33861..f103d8f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -26,10 +26,10 @@ import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.thrift.TTupleDescriptor;
 
 import com.google.common.base.Joiner;
@@ -139,13 +139,13 @@ public class TupleDescriptor {
     return result;
   }
 
-  public Table getTable() {
+  public FeTable getTable() {
     if (path_ == null) return null;
     return path_.getRootTable();
   }
 
   public TableName getTableName() {
-    Table t = getTable();
+    FeTable t = getTable();
     return (t == null) ? null : t.getTableName();
   }
 
@@ -334,7 +334,7 @@ public class TupleDescriptor {
    * Return true if the slots being materialized are all partition columns.
    */
   public boolean hasClusteringColsOnly() {
-    Table table = getTable();
+    FeTable table = getTable();
     if (!(table instanceof HdfsTable) || table.getNumClusteringCols() == 0) return false;
 
     HdfsTable hdfsTable = (HdfsTable)table;
diff --git a/fe/src/main/java/org/apache/impala/analysis/WithClause.java b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
index 1a771cd..4f93f36 100644
--- a/fe/src/main/java/org/apache/impala/analysis/WithClause.java
+++ b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.View;
 import org.apache.impala.common.AnalysisException;
 
@@ -82,7 +83,7 @@ public class WithClause implements ParseNode {
       withClauseAnalyzer.registerLocalView(view);
     }
     // Register all local views with the analyzer.
-    for (View localView: withClauseAnalyzer.getLocalViews().values()) {
+    for (FeView localView: withClauseAnalyzer.getLocalViews().values()) {
       analyzer.registerLocalView(localView);
     }
     // Record audit events because the resolved table references won't generate any
@@ -121,7 +122,7 @@ public class WithClause implements ParseNode {
       // Enclose the view alias and explicit labels in quotes if Hive cannot parse it
       // without quotes. This is needed for view compatibility between Impala and Hive.
       String aliasSql = ToSqlUtils.getIdentSql(view.getName());
-      if (view.hasColLabels()) {
+      if (view.getColLabels() != null) {
         aliasSql += "(" + Joiner.on(", ").join(
             ToSqlUtils.getIdentSqlList(view.getOriginalColLabels())) + ")";
       }
diff --git a/fe/src/main/java/org/apache/impala/catalog/Catalog.java b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
index aba88df..82b411a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
@@ -35,7 +35,6 @@ import org.apache.impala.util.PatternMatcher;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import org.apache.impala.util.TUniqueIdUtil;
 
 /**
  * Thread safe interface for reading and updating metadata stored in the Hive MetaStore.
diff --git a/fe/src/main/java/org/apache/impala/catalog/Db.java b/fe/src/main/java/org/apache/impala/catalog/Db.java
index d59a28f..5955e0a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Db.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Db.java
@@ -57,7 +57,7 @@ import com.google.common.collect.Maps;
  * value is the base64 representation of the thrift serialized function object.
  *
  */
-public class Db extends CatalogObjectImpl {
+public class Db extends CatalogObjectImpl implements FeDb {
   private static final Logger LOG = LoggerFactory.getLogger(Db.class);
   private final TDatabase thriftDb_;
 
@@ -123,9 +123,11 @@ public class Db extends CatalogObjectImpl {
     return msDb.getParameters().remove(k) != null;
   }
 
+  @Override // FeDb
   public boolean isSystemDb() { return isSystemDb_; }
+  @Override // FeDb
   public TDatabase toThrift() { return thriftDb_; }
-  @Override
+  @Override // FeDb
   public String getName() { return thriftDb_.getDb_name(); }
   @Override
   public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.DATABASE; }
@@ -149,6 +151,7 @@ public class Db extends CatalogObjectImpl {
    */
   public List<Table> getTables() { return tableCache_.getValues(); }
 
+  @Override
   public boolean containsTable(String tableName) {
     return tableCache_.contains(tableName.toLowerCase());
   }
@@ -157,6 +160,7 @@ public class Db extends CatalogObjectImpl {
    * Returns the Table with the given name if present in the table cache or null if the
    * table does not exist in the cache.
    */
+  @Override // FeTable
   public Table getTable(String tblName) { return tableCache_.get(tblName); }
 
   /**
@@ -205,26 +209,19 @@ public class Db extends CatalogObjectImpl {
   private static final FunctionResolutionOrder FUNCTION_RESOLUTION_ORDER =
       new FunctionResolutionOrder();
 
-  /**
-   * Returns the metastore.api.Database object this Database was created from.
-   * Returns null if it is not related to a hive database such as builtins_db.
-   */
+  @Override // FeDb
   public org.apache.hadoop.hive.metastore.api.Database getMetaStoreDb() {
     return thriftDb_.getMetastore_db();
   }
 
-  /**
-   * Returns the number of functions in this database.
-   */
+  @Override // FeDb
   public int numFunctions() {
     synchronized (functions_) {
       return functions_.size();
     }
   }
 
-  /**
-   * See comment in Catalog.
-   */
+  @Override // FeDb
   public boolean containsFunction(String name) {
     synchronized (functions_) {
       return functions_.get(name) != null;
@@ -234,6 +231,7 @@ public class Db extends CatalogObjectImpl {
   /*
    * See comment in Catalog.
    */
+  @Override // FeDb
   public Function getFunction(Function desc, Function.CompareMode mode) {
     synchronized (functions_) {
       List<Function> fns = functions_.get(desc.functionName());
@@ -455,6 +453,7 @@ public class Db extends CatalogObjectImpl {
   /**
    * Returns all functions with the given name
    */
+  @Override // FeDb
   public List<Function> getFunctions(String name) {
     List<Function> result = Lists.newArrayList();
     Preconditions.checkNotNull(name);
@@ -467,9 +466,7 @@ public class Db extends CatalogObjectImpl {
     return result;
   }
 
-  /**
-   * Returns all functions with the given name and category.
-   */
+  @Override
   public List<Function> getFunctions(TFunctionCategory category, String name) {
     List<Function> result = Lists.newArrayList();
     Preconditions.checkNotNull(category);
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeCatalog.java b/fe/src/main/java/org/apache/impala/catalog/FeCatalog.java
new file mode 100644
index 0000000..57aa6f2
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeCatalog.java
@@ -0,0 +1,119 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TUniqueId;
+import org.apache.impala.util.PatternMatcher;
+import org.apache.thrift.TException;
+
+/**
+ * Interface between the front-end (analysis and planning) classes and the Catalog.
+ */
+public interface FeCatalog {
+  /** @see Catalog#getDbs(PatternMatcher) */
+  List<? extends FeDb> getDbs(PatternMatcher matcher);
+
+  /** @see Catalog#getTableNames(String, PatternMatcher) */
+  List<String> getTableNames(String dbName, PatternMatcher matcher)
+      throws DatabaseNotFoundException;
+
+  /** @see Catalog#getTable(String, String) */
+  FeTable getTable(String db_name, String table_name)
+      throws DatabaseNotFoundException;
+
+  /** @see Catalog#getTCatalogObject(TCatalogObject) */
+  TCatalogObject getTCatalogObject(TCatalogObject objectDesc)
+      throws CatalogException;
+
+  /** @see Catalog#getDb(String) */
+  FeDb getDb(String db);
+
+  /** @see Catalog#getHdfsPartition(String, String, List) */
+  FeFsPartition getHdfsPartition(String db, String tbl,
+      List<TPartitionKeyValue> partition_spec) throws CatalogException;
+
+  /** @see Catalog#getDataSources(PatternMatcher) */
+  List<DataSource> getDataSources(PatternMatcher createHivePatternMatcher);
+
+  /** @see Catalog#getDataSource(String) */
+  // TODO(todd): introduce FeDataSource
+  public DataSource getDataSource(String dataSourceName);
+
+  /** @see Catalog#getFunction(Function, Function.CompareMode) */
+  // TODO(todd): introduce FeFunction
+  public Function getFunction(Function desc, Function.CompareMode mode);
+
+  // TODO(todd): introduce FeFsCachePool
+  /** @see Catalog#getHdfsCachePool(String) */
+  public HdfsCachePool getHdfsCachePool(String poolName);
+
+  /**
+   * Issues a load request to the catalogd for the given tables.
+   */
+  void prioritizeLoad(Set<TableName> tableNames) throws InternalException;
+
+  /**
+   * Causes the calling thread to wait until a catalog update notification has been sent
+   * or the given timeout has been reached. A timeout value of 0 indicates an indefinite
+   * wait. Does not protect against spurious wakeups, so this should be called in a loop.
+   */
+  void waitForCatalogUpdate(long timeoutMs);
+
+  /**
+   * Returns the FS path where the metastore would create the given table. If the table
+   * has a "location" set, that will be returned. Otherwise the path will be resolved
+   * based on the location of the parent database. The metastore folder hierarchy is:
+   * <warehouse directory>/<db name>.db/<table name>
+   * Except for items in the default database which will be:
+   * <warehouse directory>/<table name>
+   * This method handles both of these cases.
+   */
+  public Path getTablePath(Table msTbl) throws TException;
+
+  /**
+   * @return the ID of the catalog service from which this catalog most recently
+   * loaded.
+   */
+  TUniqueId getCatalogServiceId();
+
+  AuthorizationPolicy getAuthPolicy();
+  String getDefaultKuduMasterHosts();
+
+
+  /**
+   * Returns true if the catalog is ready to accept requests (has
+   * received and processed a valid catalog topic update from the StateStore),
+   * false otherwise.
+   */
+  boolean isReady();
+
+  /**
+   * Force the catalog into a particular readiness state.
+   * Used only by tests.
+   */
+  void setIsReady(boolean isReady);
+
+}
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeDb.java b/fe/src/main/java/org/apache/impala/catalog/FeDb.java
new file mode 100644
index 0000000..8de877f
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeDb.java
@@ -0,0 +1,100 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.util.PatternMatcher;
+
+/**
+ * Frontend interface for interacting with a database.
+ */
+public interface FeDb {
+  /**
+   * @return the name of the database
+   */
+  String getName();
+
+  /**
+   * @return the metastore.api.Database object this Database was created from,
+   * or null if it is not related to a hive database such as builtins_db.
+   */
+  Database getMetaStoreDb();
+
+  /**
+   * @return true if the database contains a table with the given name
+   */
+  boolean containsTable(String tableName);
+
+  /**
+   * @return the table with the given name
+   */
+  FeTable getTable(String tbl);
+
+  /**
+   * @return the names of the tables within this database
+   */
+  List<String> getAllTableNames();
+
+  /**
+   * @return true if this is a system database (i.e. cannot be dropped,
+   * modified, etc)
+   */
+  boolean isSystemDb();
+
+  // TODO(todd): can we simplify the many related 'getFunctions' calls
+  // in this interface?
+
+  /**
+   * @see Catalog#getFunction(Function, Function.CompareMode)
+   */
+  public Function getFunction(Function desc, Function.CompareMode mode);
+
+  /**
+   * @return all functions with the given name
+   */
+  List<Function> getFunctions(String functionName);
+
+  /**
+   * @return all functions with the given category and name
+   */
+  List<Function> getFunctions(TFunctionCategory category, String function);
+
+  /**
+   * @return all functions with the given category that match the given pattern
+   */
+  List<Function> getFunctions(TFunctionCategory category,
+      PatternMatcher patternMatcher);
+
+  /**
+   * @return the number of functions in this database.
+   */
+  int numFunctions();
+
+  /**
+   * @see Catalog#containsFunction(org.apache.impala.analysis.FunctionName)
+   */
+  boolean containsFunction(String function);
+
+  /**
+   * @return the Thrift-serialized structure for this database
+   */
+  TDatabase toThrift();
+}
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
new file mode 100644
index 0000000..9d0c40d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
@@ -0,0 +1,155 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.List;
+import java.util.Map;
+
+import javax.annotation.Nullable;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.TPartitionStats;
+
+/**
+ * Frontend interface for interacting with a single filesystem-based partition.
+ */
+public interface FeFsPartition {
+  /**
+   * @return a partition name formed by concatenating partition keys and their values,
+   * compatible with the way Hive names partitions
+   */
+  String getPartitionName();
+
+  /**
+   * @return the ID for this partition which identifies it within its parent table.
+   */
+  long getId();
+
+  /**
+   * @return true if this partition represents the "default partition" of an
+   * unpartitioned tabe
+   */
+  boolean isDefaultPartition();
+
+  /**
+   * @return the table that contains this partition
+   */
+  FeFsTable getTable();
+
+  /**
+   * @return the files that this partition contains
+   */
+  List<FileDescriptor> getFileDescriptors();
+
+  /**
+   * @return true if this partition contains any files
+   */
+  boolean hasFileDescriptors();
+
+  /**
+   * @return the number of files in this partition
+   */
+  int getNumFileDescriptors();
+
+  /**
+   * @return the location of this partition
+   */
+  String getLocation();
+
+  /**
+   * @return the location of this partition as a Path
+   */
+  Path getLocationPath();
+
+  /**
+   * @return the HDFS permissions Impala has to this partition's directory - READ_ONLY,
+   * READ_WRITE, etc.
+   */
+  TAccessLevel getAccessLevel();
+
+  /**
+   * @return true if the partition resides at a location which can be cached (e.g. HDFS).
+   */
+  boolean isCacheable();
+
+  /**
+   * @return true if this partition is marked cached
+   */
+  boolean isMarkedCached();
+
+  /**
+   * @return the file format information for this partition
+   */
+  HdfsStorageDescriptor getInputFormatDescriptor();
+
+  /**
+   * @return the file format within this partition as an HdfsFileFormat enum
+   */
+  HdfsFileFormat getFileFormat();
+
+  /**
+   * @return the stats for this partition, or null if no stats are available
+   */
+  @Nullable
+  TPartitionStats getPartitionStats();
+
+  /**
+   * @return true if this partition has incremental stats available
+   */
+  boolean hasIncrementalStats();
+
+  /**
+   * @return the size (in bytes) of all the files inside this partition
+   */
+  long getSize();
+
+  /**
+   * @return the estimated number of rows in this partition (-1 if unknown)
+   */
+  long getNumRows();
+
+  /**
+   * Utility method which returns a string of conjuncts of equality exprs to exactly
+   * select this partition (e.g. ((month=2009) AND (year=2012)).
+   */
+  String getConjunctSql();
+
+  /**
+   * @return a list of partition values as strings. If mapNullsToHiveKey is true, any NULL
+   * value is returned as the table's default null partition key string value, otherwise
+   * they are returned as 'NULL'.
+   */
+  List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey);
+
+  /**
+   * @return an immutable list of partition key expressions
+   */
+  List<LiteralExpr> getPartitionValues();
+
+  /**
+   * @return the value of the given column 'pos' for this partition
+   */
+  LiteralExpr getPartitionValue(int pos);
+
+  /**
+   * @return the HMS parameters stored for this partition
+   */
+  Map<String, String> getParameters();
+}
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
new file mode 100644
index 0000000..91059a4
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -0,0 +1,160 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.util.ListMap;
+
+/**
+ * Frontend interface for interacting with a filesystem-backed table.
+ */
+public interface FeFsTable extends FeTable {
+  /**
+   * @return true if the table and all its partitions reside at locations which
+   * support caching (e.g. HDFS).
+   */
+  public boolean isCacheable();
+
+  /**
+   * @return true if the table resides at a location which supports caching
+   * (e.g. HDFS).
+   */
+  public boolean isLocationCacheable();
+
+  /**
+   * @return true if this table is marked as cached
+   */
+  boolean isMarkedCached();
+
+  /*
+   * Returns the storage location (HDFS path) of this table.
+   */
+  public String getLocation();
+
+  /**
+   * @return the value Hive is configured to use for NULL partition key values.
+   */
+  public String getNullPartitionKeyValue();
+
+  /**
+   * Get file info for the given set of partitions, or all partitions if
+   * partitionSet is null.
+   *
+   * @return partition file info, ordered by partition
+   */
+  TResultSet getFiles(List<List<TPartitionKeyValue>> partitionSet)
+      throws CatalogException;
+
+  /**
+   * @return the base HDFS directory where files of this table are stored.
+   */
+  public String getHdfsBaseDir();
+
+  /**
+   * @return the total number of bytes stored for this table.
+   */
+  long getTotalHdfsBytes();
+
+  /**
+   * @return true if this table is backed by the Avro file format
+   */
+  boolean isAvroTable();
+
+  /**
+   * @param totalBytes_ the known number of bytes in the table
+   * @return Returns an estimated row count for the given number of file bytes
+   */
+  public long getExtrapolatedNumRows(long totalBytes);
+
+  /**
+   * @return true if stats extrapolation is enabled for this table, false otherwise.
+   */
+  boolean isStatsExtrapolationEnabled();
+
+  /**
+   * @return statistics on this table as a tabular result set. Used for the
+   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
+   * inside this method.
+   */
+  public TResultSet getTableStats();
+
+  /**
+   * @return all partitions of this table
+   */
+  Collection<? extends FeFsPartition> getPartitions();
+
+  /**
+   * @return identifiers for all partitions in this table
+   */
+  public Set<Long> getPartitionIds();
+
+  /**
+   * @return the map from partition identifier to partition object
+   */
+  Map<Long, ? extends FeFsPartition> getPartitionMap();
+
+  /**
+   * @param the index of the target partitioning column
+   * @return a map from value to a set of partitions for which column 'col'
+   * has that value.
+   */
+  TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int col);
+
+  /**
+   * @return the set of partitions which have a null value for column
+   * index 'colIdx'.
+   */
+  Set<Long> getNullPartitionIds(int colIdx);
+
+  /**
+   * Parses and returns the value of the 'skip.header.line.count' table property. If the
+   * value is not set for the table, returns 0. If parsing fails or a value < 0 is found,
+   * the error parameter is updated to contain an error message.
+   */
+  int parseSkipHeaderLineCount(StringBuilder error);
+
+  /**
+   * Selects a random sample of files from the given list of partitions such that the sum
+   * of file sizes is at least 'percentBytes' percent of the total number of bytes in
+   * those partitions and at least 'minSampleBytes'. The sample is returned as a map from
+   * partition id to a list of file descriptors selected from that partition.
+   * This function allocates memory proportional to the number of files in 'inputParts'.
+   * Its implementation tries to minimize the constant factor and object generation.
+   * The given 'randomSeed' is used for random number generation.
+   * The 'percentBytes' parameter must be between 0 and 100.
+   */
+  Map<Long, List<FileDescriptor>> getFilesSample(
+      Collection<? extends FeFsPartition> inputParts,
+      long percentBytes, long minSampleBytes,
+      long randomSeed);
+
+  /**
+   * @return the index of hosts that store replicas of blocks of this table.
+   */
+  ListMap<TNetworkAddress> getHostIndex();
+ }
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeTable.java b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
new file mode 100644
index 0000000..a60b827
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
@@ -0,0 +1,131 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableStats;
+
+/**
+ * Frontend interface for interacting with a table.
+ */
+public interface FeTable {
+  /** @see CatalogObject#isLoaded() */
+  boolean isLoaded();
+
+  /**
+   * @return the metastore.api.Table object this Table was created from. Returns null
+   * if the derived Table object was not created from a metastore Table (ex. InlineViews).
+   */
+  Table getMetaStoreTable();
+
+  /**
+   * @return the Hive StorageHandler class name that should be used for this table,
+   * or null if no storage handler is needed.
+   */
+  String getStorageHandlerClassName();
+
+  /**
+   * @return the type of catalog object -- either TABLE or VIEW.
+   */
+  TCatalogObjectType getCatalogObjectType();
+
+  /**
+   * @return the short name of this table (e.g. "my_table")
+   */
+  String getName();
+
+  /**
+   * @return the full name of this table (e.g. "my_db.my_table")
+   */
+  String getFullName();
+
+  /**
+   * @return the table name in structured form
+   */
+  TableName getTableName();
+
+  /**
+   * @return the columns in this table
+   */
+  ArrayList<Column> getColumns();
+
+  /**
+   * @return an unmodifiable list of all columns, but with partition columns at the end of
+   * the list rather than the beginning. This is equivalent to the order in
+   * which Hive enumerates columns.
+   */
+  List<Column> getColumnsInHiveOrder();
+
+  /**
+   * @return a list of the column names ordered by position.
+   */
+  List<String> getColumnNames();
+
+  /**
+   * @return an unmodifiable list of all partition columns.
+   */
+  List<Column> getClusteringColumns();
+
+  /**
+   * @return an unmodifiable list of all columns excluding any partition columns.
+   */
+  List<Column> getNonClusteringColumns();
+
+  int getNumClusteringCols();
+
+  boolean isClusteringColumn(Column c);
+
+  /**
+   * Case-insensitive lookup.
+   *
+   * @return null if the column with 'name' is not found.
+   */
+  Column getColumn(String name);
+
+  /**
+   * @return the type of this table (array of struct) that mirrors the columns.
+   */
+  ArrayType getType();
+
+  /**
+   * @return the database that that contains this table
+   */
+  FeDb getDb();
+
+  /**
+   * @return the estimated number of rows in this table (or -1 if unknown)
+   */
+  long getNumRows();
+
+  /**
+   * @return the stats for this table
+   */
+  TTableStats getTTableStats();
+
+  /**
+   * @return the Thrift table descriptor for this table
+   */
+  TTableDescriptor toThriftDescriptor(int tableId, Set<Long> referencedPartitions);
+
+}
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeView.java b/fe/src/main/java/org/apache/impala/catalog/FeView.java
new file mode 100644
index 0000000..abceb97
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FeView.java
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.catalog;
+
+import java.util.List;
+
+import org.apache.impala.analysis.QueryStmt;
+
+/**
+ * Frontend interface for interacting with a view.
+ */
+public interface FeView extends FeTable {
+
+  /**
+   * @return true if this is a local view (i.e. one defined in a WITH clause)
+   */
+  boolean isLocalView();
+
+  /**
+   * @return the query statement that was parsed to create this view
+   */
+  QueryStmt getQueryStmt();
+
+  /**
+   * @return the explicit column labels for this view, or null if they need to be derived
+   * entirely from the underlying query statement. The returned list has at least as many
+   * elements as the number of column labels in the query statement.
+   */
+  List<String> getColLabels();
+
+}
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index 5765902..3be49e8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -20,6 +20,7 @@ package org.apache.impala.catalog;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collection;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -75,7 +76,7 @@ import com.google.flatbuffers.FlatBufferBuilder;
  * order with NULLs sorting last. The ordering is useful for displaying partitions
  * in SHOW statements.
  */
-public class HdfsPartition implements Comparable<HdfsPartition> {
+public class HdfsPartition implements FeFsPartition {
   /**
    * Metadata for a single file in this partition.
    */
@@ -437,17 +438,17 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
   // store intermediate state for statistics computations.
   private Map<String, String> hmsParameters_;
 
+  @Override // FeFsPartition
   public HdfsStorageDescriptor getInputFormatDescriptor() {
     return fileFormatDescriptor_;
   }
 
+  @Override // FeFsPartition
   public boolean isDefaultPartition() {
     return id_ == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
   }
 
-  /**
-   * Returns true if the partition resides at a location which can be cached (e.g. HDFS).
-   */
+  @Override // FeFsPartition
   public boolean isCacheable() {
     return FileSystemUtil.isPathCacheable(new Path(getLocation()));
   }
@@ -461,6 +462,7 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
    * TODO: Consider storing the PartitionKeyValue in HdfsPartition. It would simplify
    * this code would be useful in other places, such as fromThrift().
    */
+  @Override // FeFsPartition
   public String getPartitionName() {
     List<String> partitionCols = Lists.newArrayList();
     for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
@@ -471,11 +473,7 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
         partitionCols, getPartitionValuesAsStrings(true));
   }
 
-  /**
-   * Returns a list of partition values as strings. If mapNullsToHiveKey is true, any NULL
-   * value is returned as the table's default null partition key string value, otherwise
-   * they are returned as 'NULL'.
-   */
+  @Override
   public List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
     List<String> ret = Lists.newArrayList();
     for (LiteralExpr partValue: getPartitionValues()) {
@@ -489,13 +487,10 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
     return ret;
   }
 
-  /**
-   * Utility method which returns a string of conjuncts of equality exprs to exactly
-   * select this partition (e.g. ((month=2009) AND (year=2012)).
-   * TODO: Remove this when the TODO elsewhere in this file to save and expose the
-   * list of TPartitionKeyValues has been resolved.
-   */
+  @Override // FeFsPartition
   public String getConjunctSql() {
+    // TODO: Remove this when the TODO elsewhere in this file to save and expose the
+    // list of TPartitionKeyValues has been resolved.
     List<String> partColSql = Lists.newArrayList();
     for (Column partCol: getTable().getClusteringColumns()) {
       partColSql.add(ToSqlUtils.getIdentSql(partCol.getName()));
@@ -537,10 +532,14 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
   public String getLocation() {
     return (location_ != null) ? location_.toString() : null;
   }
+  @Override // FeFsPartition
   public Path getLocationPath() { return new Path(getLocation()); }
+  @Override // FeFsPartition
   public long getId() { return id_; }
+  @Override // FeFsPartition
   public HdfsTable getTable() { return table_; }
   public void setNumRows(long numRows) { numRows_ = numRows; }
+  @Override // FeFsPartition
   public long getNumRows() { return numRows_; }
   public boolean isMarkedCached() { return isMarkedCached_; }
   void markCached() { isMarkedCached_ = true; }
@@ -557,6 +556,7 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
         fileFormatDescriptor_.getFileFormat().serializationLib());
   }
 
+  @Override // FeFsPartition
   public HdfsFileFormat getFileFormat() {
     return fileFormatDescriptor_.getFileFormat();
   }
@@ -569,8 +569,7 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
     return cachedMsPartitionDescriptor_.sdSerdeInfo;
   }
 
-  // May return null if no per-partition stats were recorded, or if the per-partition
-  // stats could not be deserialised from the parameter map.
+  @Override // FeFsPartition
   public TPartitionStats getPartitionStats() {
     try {
       return PartitionStatsUtil.partStatsFromParameters(hmsParameters_);
@@ -582,17 +581,16 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
     }
   }
 
+  @Override // FeFsPartition
   public boolean hasIncrementalStats() {
     TPartitionStats partStats = getPartitionStats();
     return partStats != null && partStats.intermediate_col_stats != null;
   }
 
-  /**
-   * Returns the HDFS permissions Impala has to this partition's directory - READ_ONLY,
-   * READ_WRITE, etc.
-   */
+  @Override // FeFsPartition
   public TAccessLevel getAccessLevel() { return accessLevel_; }
 
+  @Override // FeFsPartition
   public Map<String, String> getParameters() { return hmsParameters_; }
 
   public void putToParameters(String k, String v) { hmsParameters_.put(k, v); }
@@ -608,17 +606,18 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
   public void markDirty() { isDirty_ = true; }
   public boolean isDirty() { return isDirty_; }
 
-  /**
-   * Returns an immutable list of partition key expressions
-   */
+  @Override // FeFsPartition
   public List<LiteralExpr> getPartitionValues() { return partitionKeyValues_; }
+  @Override // FeFsPartition
   public LiteralExpr getPartitionValue(int i) { return partitionKeyValues_.get(i); }
+  @Override // FeFsPartition
   public List<HdfsPartition.FileDescriptor> getFileDescriptors() {
     return fileDescriptors_;
   }
   public void setFileDescriptors(List<FileDescriptor> descriptors) {
     fileDescriptors_ = descriptors;
   }
+  @Override // FeFsPartition
   public int getNumFileDescriptors() {
     return fileDescriptors_ == null ? 0 : fileDescriptors_.size();
   }
@@ -766,9 +765,7 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
         TAccessLevel.READ_WRITE);
   }
 
-  /**
-   * Return the size (in bytes) of all the files inside this partition
-   */
+  @Override
   public long getSize() {
     long result = 0;
     for (HdfsPartition.FileDescriptor fileDescriptor: fileDescriptors_) {
@@ -921,11 +918,14 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
   }
 
   /**
-   * Comparison method to allow ordering of HdfsPartitions by their partition-key values.
+   * Comparator to allow ordering of partitions by their partition-key values.
    */
-  @Override
-  public int compareTo(HdfsPartition o) {
-    return comparePartitionKeyValues(partitionKeyValues_, o.getPartitionValues());
+  public static final KeyValueComparator KV_COMPARATOR = new KeyValueComparator();
+  public static class KeyValueComparator implements Comparator<FeFsPartition> {
+    @Override
+    public int compare(FeFsPartition o1, FeFsPartition o2) {
+      return comparePartitionKeyValues(o1.getPartitionValues(), o2.getPartitionValues());
+    }
   }
 
   @VisibleForTesting
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 0362625..7ad9fb8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -113,7 +113,7 @@ import com.google.common.collect.Sets;
  * The partition keys constitute the clustering columns.
  *
  */
-public class HdfsTable extends Table {
+public class HdfsTable extends Table implements FeFsTable {
   // hive's default value for table property 'serialization.null.format'
   private static final String DEFAULT_NULL_COLUMN_VALUE = "\\N";
 
@@ -352,21 +352,16 @@ public class HdfsTable extends Table {
         new HdfsPartitionLocationCompressor(numClusteringCols_);
   }
 
-  /**
-   * Returns true if the table resides at a location which supports caching (e.g. HDFS).
-   */
+  @Override // FeFsTable
   public boolean isLocationCacheable() {
     return FileSystemUtil.isPathCacheable(new Path(getLocation()));
   }
 
-  /**
-   * Returns true if the table and all its partitions reside at locations which
-   * support caching (e.g. HDFS).
-   */
+  @Override // FeFsTable
   public boolean isCacheable() {
     if (!isLocationCacheable()) return false;
     if (!isMarkedCached() && numClusteringCols_ > 0) {
-      for (HdfsPartition partition: getPartitions()) {
+      for (FeFsPartition partition: getPartitions()) {
         if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
           continue;
         }
@@ -556,27 +551,41 @@ public class HdfsTable extends Table {
   public TCatalogObjectType getCatalogObjectType() {
     return TCatalogObjectType.TABLE;
   }
+
+  @Override // FeFsTable
   public boolean isMarkedCached() { return isMarkedCached_; }
-  public Collection<HdfsPartition> getPartitions() { return partitionMap_.values(); }
-  public Map<Long, HdfsPartition> getPartitionMap() { return partitionMap_; }
+
+  @Override // FeFsTable
+  public Collection<? extends FeFsPartition> getPartitions() {
+    return partitionMap_.values();
+  }
+
+  @Override // FeFsTable
+  public Map<Long, ? extends FeFsPartition> getPartitionMap() {
+    return partitionMap_;
+  }
+
+  @Override // FeFsTable
   public Set<Long> getNullPartitionIds(int i) { return nullPartitionIds_.get(i); }
+
   public HdfsPartitionLocationCompressor getPartitionLocationCompressor() {
     return partitionLocationCompressor_;
   }
+
+  @Override // FeFsTable
   public Set<Long> getPartitionIds() { return partitionIds_; }
+
+  @Override // FeFsTable
   public TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int i) {
     return partitionValuesMap_.get(i);
   }
 
-  /**
-   * Returns the value Hive is configured to use for NULL partition key values.
-   * Set during load.
-   */
-  public String getNullPartitionKeyValue() { return nullPartitionKeyValue_; }
+  @Override // FeFsTable
+  public String getNullPartitionKeyValue() {
+    return nullPartitionKeyValue_; // Set during load.
+  }
 
-  /*
-   * Returns the storage location (HDFS path) of this table.
-   */
+  @Override // FeFsTable
   public String getLocation() {
     return super.getMetaStoreTable().getSd().getLocation();
   }
@@ -1454,11 +1463,7 @@ public class HdfsTable extends Table {
     return msTbl.getParameters().containsKey(TBL_PROP_SKIP_HEADER_LINE_COUNT);
   }
 
-  /**
-   * Parses and returns the value of the 'skip.header.line.count' table property. If the
-   * value is not set for the table, returns 0. If parsing fails or a value < 0 is found,
-   * the error parameter is updated to contain an error message.
-   */
+  @Override // FeTable
   public int parseSkipHeaderLineCount(StringBuilder error) {
     if (!hasSkipHeaderLineCount()) return 0;
     return parseSkipHeaderLineCount(getMetaStoreTable().getParameters(), error);
@@ -1753,14 +1758,16 @@ public class HdfsTable extends Table {
     return hdfsTable;
   }
 
+  @Override // FeFsTable
   public long getTotalHdfsBytes() { return fileMetadataStats_.totalFileBytes; }
+
+  @Override // FeFsTable
   public String getHdfsBaseDir() { return hdfsBaseDir_; }
   public Path getHdfsBaseDirPath() { return new Path(hdfsBaseDir_); }
+  @Override // FeFsTable
   public boolean isAvroTable() { return avroSchema_ != null; }
 
-  /**
-   * Get the index of hosts that store replicas of blocks of this table.
-   */
+  @Override // FeFsTable
   public ListMap<TNetworkAddress> getHostIndex() { return hostIndex_; }
 
   /**
@@ -1938,6 +1945,7 @@ public class HdfsTable extends Table {
    * - the row count statistic is zero and the file bytes is non-zero
    * Otherwise, returns a value >= 1.
    */
+  @Override // FeFsTable
   public long getExtrapolatedNumRows(long fileBytes) {
     if (!isStatsExtrapolationEnabled()) return -1;
     if (fileBytes == 0) return 0;
@@ -1954,6 +1962,7 @@ public class HdfsTable extends Table {
    * Reconciles the Impalad-wide --enable_stats_extrapolation flag and the
    * TBL_PROP_ENABLE_STATS_EXTRAPOLATION table property
    */
+  @Override // FeFsTable
   public boolean isStatsExtrapolationEnabled() {
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     String propVal = msTbl.getParameters().get(TBL_PROP_ENABLE_STATS_EXTRAPOLATION);
@@ -1961,11 +1970,7 @@ public class HdfsTable extends Table {
     return Boolean.parseBoolean(propVal);
   }
 
-  /**
-   * Returns statistics on this table as a tabular result set. Used for the
-   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
-   * inside this method.
-   */
+  @Override // FeFsTable
   public TResultSet getTableStats() {
     TResultSet result = new TResultSet();
     TResultSetMetadata resultSchema = new TResultSetMetadata();
@@ -1995,7 +2000,7 @@ public class HdfsTable extends Table {
     // Pretty print partitions and their stats.
     ArrayList<HdfsPartition> orderedPartitions =
         Lists.newArrayList(partitionMap_.values());
-    Collections.sort(orderedPartitions);
+    Collections.sort(orderedPartitions, HdfsPartition.KV_COMPARATOR);
 
     long totalCachedBytes = 0L;
     for (HdfsPartition p: orderedPartitions) {
@@ -2076,11 +2081,7 @@ public class HdfsTable extends Table {
     return result;
   }
 
-  /**
-   * Returns files info for the given dbname/tableName and partition spec.
-   * Returns files info for all partitions, if partition spec is null, ordered
-   * by partition.
-   */
+  @Override // FeFsTable
   public TResultSet getFiles(List<List<TPartitionKeyValue>> partitionSet)
       throws CatalogException {
     TResultSet result = new TResultSet();
@@ -2098,7 +2099,7 @@ public class HdfsTable extends Table {
       // Get a list of HdfsPartition objects for the given partition set.
       orderedPartitions = getPartitionsFromPartitionSet(partitionSet);
     }
-    Collections.sort(orderedPartitions);
+    Collections.sort(orderedPartitions, HdfsPartition.KV_COMPARATOR);
 
     for (HdfsPartition p: orderedPartitions) {
       List<FileDescriptor> orderedFds = Lists.newArrayList(p.getFileDescriptors());
@@ -2140,23 +2141,15 @@ public class HdfsTable extends Table {
         hmsPartition.getSd(), hmsPartition);
     refreshPartitionFileMetadata(refreshedPartition);
     Preconditions.checkArgument(oldPartition == null
-        || oldPartition.compareTo(refreshedPartition) == 0);
+        || HdfsPartition.KV_COMPARATOR.compare(oldPartition, refreshedPartition) == 0);
     dropPartition(oldPartition);
     addPartition(refreshedPartition);
   }
 
-  /**
-   * Selects a random sample of files from the given list of partitions such that the sum
-   * of file sizes is at least 'percentBytes' percent of the total number of bytes in
-   * those partitions and at least 'minSampleBytes'. The sample is returned as a map from
-   * partition id to a list of file descriptors selected from that partition.
-   * This function allocates memory proportional to the number of files in 'inputParts'.
-   * Its implementation tries to minimize the constant factor and object generation.
-   * The given 'randomSeed' is used for random number generation.
-   * The 'percentBytes' parameter must be between 0 and 100.
-   */
+  @Override // FeFsTable
   public Map<Long, List<FileDescriptor>> getFilesSample(
-      Collection<HdfsPartition> inputParts, long percentBytes, long minSampleBytes,
+      Collection<? extends FeFsPartition> inputParts,
+      long percentBytes, long minSampleBytes,
       long randomSeed) {
     Preconditions.checkState(percentBytes >= 0 && percentBytes <= 100);
     Preconditions.checkState(minSampleBytes >= 0);
@@ -2174,8 +2167,8 @@ public class HdfsTable extends Table {
 
     // Ensure a consistent ordering of files for repeatable runs. The files within a
     // partition are already ordered based on how they are loaded in the catalog.
-    List<HdfsPartition> orderedParts = Lists.newArrayList(inputParts);
-    Collections.sort(orderedParts);
+    List<FeFsPartition> orderedParts = Lists.newArrayList(inputParts);
+    Collections.sort(orderedParts, HdfsPartition.KV_COMPARATOR);
 
     // fileIdxs contains indexes into the file descriptor lists of all inputParts
     // parts[i] contains the partition corresponding to fileIdxs[i]
@@ -2185,10 +2178,10 @@ public class HdfsTable extends Table {
     // multiple times during the sampling, regardless of the sample percent. We purposely
     // avoid generating objects proportional to the number of files.
     int[] fileIdxs = new int[totalNumFiles];
-    HdfsPartition[] parts = new HdfsPartition[totalNumFiles];
+    FeFsPartition[] parts = new FeFsPartition[totalNumFiles];
     int idx = 0;
     long totalBytes = 0;
-    for (HdfsPartition part: orderedParts) {
+    for (FeFsPartition part: orderedParts) {
       totalBytes += part.getSize();
       int numFds = part.getNumFileDescriptors();
       for (int fileIdx = 0; fileIdx < numFds; ++fileIdx) {
@@ -2210,7 +2203,7 @@ public class HdfsTable extends Table {
     Map<Long, List<FileDescriptor>> result = Maps.newHashMap();
     while (selectedBytes < targetBytes && numFilesRemaining > 0) {
       int selectedIdx = Math.abs(rnd.nextInt()) % numFilesRemaining;
-      HdfsPartition part = parts[selectedIdx];
+      FeFsPartition part = parts[selectedIdx];
       Long partId = Long.valueOf(part.getId());
       List<FileDescriptor> sampleFileIdxs = result.get(partId);
       if (sampleFileIdxs == null) {
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
index 369cc9c..fe5244d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
@@ -20,7 +20,6 @@ package org.apache.impala.catalog;
 import java.nio.ByteBuffer;
 import java.util.ArrayDeque;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.fs.Path;
@@ -73,7 +72,7 @@ import com.google.common.base.Preconditions;
  * The CatalogServiceId is also tracked to detect if a different instance of the catalog
  * service has been started, in which case a full topic update is required.
  */
-public class ImpaladCatalog extends Catalog {
+public class ImpaladCatalog extends Catalog implements FeCatalog {
   private static final Logger LOG = Logger.getLogger(ImpaladCatalog.class);
   private static final TUniqueId INITIAL_CATALOG_SERVICE_ID = new TUniqueId(0L, 0L);
   public static final String BUILTINS_DB = "_impala_builtins";
@@ -222,19 +221,12 @@ public class ImpaladCatalog extends Catalog {
   }
 
 
-  /**
-   * Issues a load request to the catalogd for the given tables.
-   */
+  @Override // FeCatalog
   public void prioritizeLoad(Set<TableName> tableNames) throws InternalException {
     FeSupport.PrioritizeLoad(tableNames);
   }
 
-  /**
-   * Causes the calling thread to wait until a catalog update notification has been sent
-   * or the given timeout has been reached. A timeout value of 0 indicates an indefinite
-   * wait. Does not protect against spurious wakeups, so this should be called in a loop.
-   *
-   */
+  @Override // FeCatalog
   public void waitForCatalogUpdate(long timeoutMs) {
     synchronized (catalogUpdateEventNotifier_) {
       try {
@@ -246,15 +238,7 @@ public class ImpaladCatalog extends Catalog {
   }
 
 
-  /**
-   * Returns the HDFS path where the metastore would create the given table. If the table
-   * has a "location" set, that will be returned. Otherwise the path will be resolved
-   * based on the location of the parent database. The metastore folder hierarchy is:
-   * <warehouse directory>/<db name>.db/<table name>
-   * Except for items in the default database which will be:
-   * <warehouse directory>/<table name>
-   * This method handles both of these cases.
-   */
+  @Override // FeCatalog
   public Path getTablePath(org.apache.hadoop.hive.metastore.api.Table msTbl)
       throws TException {
     try (MetaStoreClient msClient = getMetaStoreClient()) {
@@ -510,23 +494,22 @@ public class ImpaladCatalog extends Catalog {
     }
   }
 
-  /**
-   * Returns true if the ImpaladCatalog is ready to accept requests (has
-   * received and processed a valid catalog topic update from the StateStore),
-   * false otherwise.
-   */
+  @Override // FeCatalog
   public boolean isReady() {
     return lastSyncedCatalogVersion_.get() > INITIAL_CATALOG_VERSION;
   }
 
   // Only used for testing.
+  @Override // FeCatalog
   public void setIsReady(boolean isReady) {
     lastSyncedCatalogVersion_.incrementAndGet();
     synchronized (catalogUpdateEventNotifier_) {
       catalogUpdateEventNotifier_.notifyAll();
     }
   }
+  @Override // FeCatalog
   public AuthorizationPolicy getAuthPolicy() { return authPolicy_; }
+  @Override // FeCatalog
   public String getDefaultKuduMasterHosts() { return defaultKuduMasterHosts_; }
 
   private void LibCacheSetNeedsRefresh(String hdfsLocation) {
@@ -539,6 +522,8 @@ public class ImpaladCatalog extends Catalog {
       LOG.error("LibCacheRemoveEntry(" + hdfsLibFile + ") failed.");
     }
   }
+
+  @Override
   public TUniqueId getCatalogServiceId() { return catalogServiceId_; }
 
   public static Db getBuiltinsDb() { return builtinsDb_; }
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index 57dab1a..6e7d784 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -59,7 +59,7 @@ import com.google.common.collect.Maps;
  * is more general than Hive's CLUSTER BY ... INTO BUCKETS clause (which partitions
  * a key range into a fixed number of buckets).
  */
-public abstract class Table extends CatalogObjectImpl {
+public abstract class Table extends CatalogObjectImpl implements FeTable {
   private static final Logger LOG = Logger.getLogger(Table.class);
   protected org.apache.hadoop.hive.metastore.api.Table msTable_;
   protected final Db db_;
@@ -128,7 +128,10 @@ public abstract class Table extends CatalogObjectImpl {
   public ReentrantLock getLock() { return tableLock_; }
   public abstract TTableDescriptor toThriftDescriptor(
       int tableId, Set<Long> referencedPartitions);
+
+  @Override // FeTable
   public abstract TCatalogObjectType getCatalogObjectType();
+
   public long getMetadataOpsCount() { return metadataOpsCount_.get(); }
   public long getEstimatedMetadataSize() { return estimatedMetadataSize_.get(); }
   public void setEstimatedMetadataSize(long estimatedMetadataSize) {
@@ -306,6 +309,7 @@ public abstract class Table extends CatalogObjectImpl {
     return newTable;
   }
 
+  @Override // FeTable
   public boolean isClusteringColumn(Column c) {
     return c.getPosition() < numClusteringCols_;
   }
@@ -431,20 +435,27 @@ public abstract class Table extends CatalogObjectImpl {
      return type;
    }
 
+  @Override // FeTable
   public Db getDb() { return db_; }
+
+  @Override // FeTable
   public String getName() { return name_; }
+
+  @Override // FeTable
   public String getFullName() { return (db_ != null ? db_.getName() + "." : "") + name_; }
+
+  @Override // FeTable
   public TableName getTableName() {
     return new TableName(db_ != null ? db_.getName() : null, name_);
   }
-  @Override
+
+  @Override // CatalogObject
   public String getUniqueName() { return "TABLE:" + getFullName(); }
 
+  @Override // FeTable
   public ArrayList<Column> getColumns() { return colsByPos_; }
 
-  /**
-   * Returns a list of the column names ordered by position.
-   */
+  @Override // FeTable
   public List<String> getColumnNames() { return Column.toColumnNames(colsByPos_); }
 
   /**
@@ -462,44 +473,31 @@ public abstract class Table extends CatalogObjectImpl {
    * Subclasses should override this if they provide a storage handler class. Currently
    * only HBase tables need to provide a storage handler.
    */
+  @Override // FeTable
   public String getStorageHandlerClassName() { return null; }
 
-  /**
-   * Returns an unmodifiable list of all columns, but with partition columns at the end of
-   * the list rather than the beginning. This is equivalent to the order in
-   * which Hive enumerates columns.
-   */
+  @Override // FeTable
   public List<Column> getColumnsInHiveOrder() {
     ArrayList<Column> columns = Lists.newArrayList(getNonClusteringColumns());
     columns.addAll(getClusteringColumns());
     return Collections.unmodifiableList(columns);
   }
 
-
-  /**
-   * Returns an unmodifiable list of all partition columns.
-   */
+  @Override // FeTable
   public List<Column> getClusteringColumns() {
     return Collections.unmodifiableList(colsByPos_.subList(0, numClusteringCols_));
   }
 
-  /**
-   * Returns an unmodifiable list of all columns excluding any partition columns.
-   */
+  @Override // FeTable
   public List<Column> getNonClusteringColumns() {
     return Collections.unmodifiableList(colsByPos_.subList(numClusteringCols_,
         colsByPos_.size()));
   }
 
-  /**
-   * Case-insensitive lookup. Returns null if the column with 'name' is not found.
-   */
+  @Override // FeTable
   public Column getColumn(String name) { return colsByName_.get(name.toLowerCase()); }
 
-  /**
-   * Returns the metastore.api.Table object this Table was created from. Returns null
-   * if the derived Table object was not created from a metastore Table (ex. InlineViews).
-   */
+  @Override // FeTable
   public org.apache.hadoop.hive.metastore.api.Table getMetaStoreTable() {
     return msTable_;
   }
@@ -508,6 +506,7 @@ public abstract class Table extends CatalogObjectImpl {
     msTable_ = msTbl;
   }
 
+  @Override // FeTable
   public int getNumClusteringCols() { return numClusteringCols_; }
 
   /**
@@ -520,8 +519,13 @@ public abstract class Table extends CatalogObjectImpl {
     numClusteringCols_ = n;
   }
 
+  @Override // FeTable
   public long getNumRows() { return tableStats_.num_rows; }
+
+  @Override // FeTable
   public TTableStats getTTableStats() { return tableStats_; }
+
+  @Override // FeTable
   public ArrayType getType() { return type_; }
 
   public static boolean isExternalTable(
diff --git a/fe/src/main/java/org/apache/impala/catalog/View.java b/fe/src/main/java/org/apache/impala/catalog/View.java
index c1f1a9b..66d942a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/View.java
+++ b/fe/src/main/java/org/apache/impala/catalog/View.java
@@ -43,7 +43,7 @@ import com.google.common.collect.Lists;
  * Refreshing or invalidating a view will reload the view's definition but will not
  * affect the metadata of the underlying tables (if any).
  */
-public class View extends Table {
+public class View extends Table implements FeView {
 
   // The original SQL-string given as view definition. Set during analysis.
   // Corresponds to Hive's viewOriginalText.
@@ -165,7 +165,11 @@ public class View extends Table {
 
   @Override
   public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.VIEW; }
+
+  @Override // FeView
   public QueryStmt getQueryStmt() { return queryStmt_; }
+
+  @Override // FeView
   public boolean isLocalView() { return isLocalView_; }
 
   /**
@@ -173,11 +177,7 @@ public class View extends Table {
    */
   public List<String> getOriginalColLabels() { return colLabels_; }
 
-  /**
-   * Returns the explicit column labels for this view, or null if they need to be derived
-   * entirely from the underlying query statement. The returned list has at least as many
-   * elements as the number of column labels in the query stmt.
-   */
+  @Override
   public List<String> getColLabels() {
     if (colLabels_ == null) return null;
     if (colLabels_.size() >= queryStmt_.getColLabels().size()) return colLabels_;
@@ -187,8 +187,6 @@ public class View extends Table {
     return explicitColLabels;
   }
 
-  public boolean hasColLabels() { return colLabels_ != null; }
-
   @Override
   public TTableDescriptor toThriftDescriptor(int tableId, Set<Long> referencedPartitions) {
     throw new IllegalStateException("Cannot call toThriftDescriptor() on a view.");
diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java b/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
index 28939ed..75042e6 100644
--- a/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
@@ -19,7 +19,7 @@
 package org.apache.impala.planner;
 
 import org.apache.impala.analysis.DescriptorTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.thrift.TDataSink;
 import org.apache.impala.thrift.TDataSinkType;
 import org.apache.impala.thrift.TExplainLevel;
@@ -32,7 +32,7 @@ import org.apache.impala.thrift.TTableSinkType;
  * data from a plan fragment into an HBase table using HTable.
  */
 public class HBaseTableSink extends TableSink {
-  public HBaseTableSink(Table targetTable) {
+  public HBaseTableSink(FeTable targetTable) {
     super(targetTable, Op.INSERT);
   }
 
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
index 3c0fb15..c8f1cc6 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
@@ -28,7 +28,8 @@ import org.apache.impala.analysis.SlotDescriptor;
 import org.apache.impala.analysis.SlotId;
 import org.apache.impala.analysis.SlotRef;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.NotImplementedException;
@@ -56,7 +57,7 @@ public class HdfsPartitionFilter {
   // indices into Table.getColumnNames()
   private final ArrayList<Integer> refdKeys_ = Lists.newArrayList();
 
-  public HdfsPartitionFilter(Expr predicate, HdfsTable tbl, Analyzer analyzer) {
+  public HdfsPartitionFilter(Expr predicate, FeFsTable tbl, Analyzer analyzer) {
     predicate_ = predicate;
 
     // populate lhsSlotRefs_ and refdKeys_
@@ -83,14 +84,14 @@ public class HdfsPartitionFilter {
    * Evaluate a filter against a batch of partitions and return the partition ids
    * that pass the filter.
    */
-  public HashSet<Long> getMatchingPartitionIds(ArrayList<HdfsPartition> partitions,
+  public HashSet<Long> getMatchingPartitionIds(ArrayList<FeFsPartition> partitions,
       Analyzer analyzer) throws ImpalaException {
     HashSet<Long> result = new HashSet<Long>();
     // List of predicates to evaluate
     ArrayList<Expr> predicates = new ArrayList<Expr>(partitions.size());
     long[] partitionIds = new long[partitions.size()];
     int indx = 0;
-    for (HdfsPartition p: partitions) {
+    for (FeFsPartition p: partitions) {
       predicates.add(buildPartitionPredicate(p, analyzer));
       partitionIds[indx++] = p.getId();
     }
@@ -110,13 +111,13 @@ public class HdfsPartitionFilter {
    * Construct a predicate for a given partition by substituting the SlotRefs
    * for the partition cols with the respective partition-key values.
    */
-  private Expr buildPartitionPredicate(HdfsPartition partition, Analyzer analyzer)
+  private Expr buildPartitionPredicate(FeFsPartition p, Analyzer analyzer)
       throws ImpalaException {
     // construct smap
     ExprSubstitutionMap sMap = new ExprSubstitutionMap();
     for (int i = 0; i < refdKeys_.size(); ++i) {
       sMap.put(
-          lhsSlotRefs_.get(i), partition.getPartitionValues().get(refdKeys_.get(i)));
+          lhsSlotRefs_.get(i), p.getPartitionValues().get(refdKeys_.get(i)));
     }
 
     Expr literalPredicate = predicate_.substitute(sMap, analyzer, false);
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index 7e7d852..1240e7f 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -39,8 +39,8 @@ import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.analysis.SlotId;
 import org.apache.impala.analysis.SlotRef;
 import org.apache.impala.analysis.TupleDescriptor;
-import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.rewrite.BetweenToCompoundRule;
@@ -77,7 +77,7 @@ public class HdfsPartitionPruner {
   // Partition batch size used during partition pruning.
   private final static int PARTITION_PRUNING_BATCH_SIZE = 1024;
 
-  private final HdfsTable tbl_;
+  private final FeFsTable tbl_;
   private final List<SlotId> partitionSlots_;
 
   // For converting BetweenPredicates to CompoundPredicates so they can be
@@ -86,8 +86,8 @@ public class HdfsPartitionPruner {
       new ExprRewriter(BetweenToCompoundRule.INSTANCE);
 
   public HdfsPartitionPruner(TupleDescriptor tupleDesc) {
-    Preconditions.checkState(tupleDesc.getTable() instanceof HdfsTable);
-    tbl_ = (HdfsTable)tupleDesc.getTable();
+    Preconditions.checkState(tupleDesc.getTable() instanceof FeFsTable);
+    tbl_ = (FeFsTable)tupleDesc.getTable();
     partitionSlots_ = tupleDesc.getPartitionSlots();
 
   }
@@ -97,7 +97,7 @@ public class HdfsPartitionPruner {
    * that conjuncts used for filtering will be removed from the list 'conjuncts'.
    * If 'allowEmpty' is False, empty partitions are not returned.
    */
-  public List<HdfsPartition> prunePartitions(
+  public List<FeFsPartition> prunePartitions(
       Analyzer analyzer, List<Expr> conjuncts, boolean allowEmpty)
       throws ImpalaException {
     // Start with creating a collection of partition filters for the applicable conjuncts.
@@ -154,10 +154,10 @@ public class HdfsPartitionPruner {
     evalPartitionFiltersInBe(partitionFilters, matchingPartitionIds, analyzer);
 
     // Populate the list of valid, non-empty partitions to process
-    List<HdfsPartition> results = Lists.newArrayList();
-    Map<Long, HdfsPartition> partitionMap = tbl_.getPartitionMap();
+    List<FeFsPartition> results = Lists.newArrayList();
+    Map<Long, ? extends FeFsPartition> partitionMap = tbl_.getPartitionMap();
     for (Long id: matchingPartitionIds) {
-      HdfsPartition partition = partitionMap.get(id);
+      FeFsPartition partition = partitionMap.get(id);
       Preconditions.checkNotNull(partition);
       if (partition.hasFileDescriptors() || allowEmpty) results.add(partition);
     }
@@ -443,16 +443,16 @@ public class HdfsPartitionPruner {
    */
   private void evalPartitionFiltersInBe(List<HdfsPartitionFilter> filters,
       HashSet<Long> matchingPartitionIds, Analyzer analyzer) throws ImpalaException {
-    Map<Long, HdfsPartition> partitionMap = tbl_.getPartitionMap();
+    Map<Long, ? extends FeFsPartition> partitionMap = tbl_.getPartitionMap();
     // Set of partition ids that pass a filter
     HashSet<Long> matchingIds = Sets.newHashSet();
     // Batch of partitions
-    ArrayList<HdfsPartition> partitionBatch = Lists.newArrayList();
+    ArrayList<FeFsPartition> partitionBatch = Lists.newArrayList();
     // Identify the partitions that pass all filters.
     for (HdfsPartitionFilter filter: filters) {
       // Iterate through the currently valid partitions
       for (Long id: matchingPartitionIds) {
-        HdfsPartition p = partitionMap.get(id);
+        FeFsPartition p = partitionMap.get(id);
         Preconditions.checkState(
             p.getPartitionValues().size() == tbl_.getNumClusteringCols());
         // Add the partition to the current batch
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 2c127f0..16861af 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -50,8 +50,9 @@ import org.apache.impala.analysis.TupleId;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.HdfsCompression;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsPartition.FileBlock;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
@@ -164,7 +165,7 @@ public class HdfsScanNode extends ScanNode {
   private final HdfsTable tbl_;
 
   // List of partitions to be scanned. Partitions have been pruned.
-  private final List<HdfsPartition> partitions_;
+  private final List<FeFsPartition> partitions_;
 
   // Parameters for table sampling. Null if not sampling.
   private final TableSampleClause sampleParams_;
@@ -277,9 +278,9 @@ public class HdfsScanNode extends ScanNode {
    * class comments above for details.
    */
   public HdfsScanNode(PlanNodeId id, TupleDescriptor desc, List<Expr> conjuncts,
-      List<HdfsPartition> partitions, TableRef hdfsTblRef, AggregateInfo aggInfo) {
+      List<FeFsPartition> partitions, TableRef hdfsTblRef, AggregateInfo aggInfo) {
     super(id, desc, "SCAN HDFS");
-    Preconditions.checkState(desc.getTable() instanceof HdfsTable);
+    Preconditions.checkState(desc.getTable() instanceof FeFsTable);
     tbl_ = (HdfsTable)desc.getTable();
     conjuncts_ = conjuncts;
     partitions_ = partitions;
@@ -300,7 +301,7 @@ public class HdfsScanNode extends ScanNode {
   @Override
   protected String debugString() {
     ToStringHelper helper = Objects.toStringHelper(this);
-    for (HdfsPartition partition: partitions_) {
+    for (FeFsPartition partition: partitions_) {
       helper.add("Partition " + partition.getId() + ":", partition.toString());
     }
     return helper.addValue(super.debugString()).toString();
@@ -422,7 +423,7 @@ public class HdfsScanNode extends ScanNode {
       }
     }
 
-    for (HdfsPartition part: partitions_) {
+    for (FeFsPartition part: partitions_) {
       HdfsFileFormat format = part.getInputFormatDescriptor().getFileFormat();
       if (format.isComplexTypesSupported()) continue;
       // If the file format allows querying just scalar typed columns and the query
@@ -760,7 +761,7 @@ public class HdfsScanNode extends ScanNode {
     largestScanRangeBytes_ = 0;
     maxScanRangeNumRows_ = -1;
     fileFormats_ = Sets.newHashSet();
-    for (HdfsPartition partition: partitions_) {
+    for (FeFsPartition partition: partitions_) {
       List<FileDescriptor> fileDescs = partition.getFileDescriptors();
       if (sampledFiles != null) {
         // If we are sampling, check whether this partition is included in the sample.
@@ -785,7 +786,7 @@ public class HdfsScanNode extends ScanNode {
         // Limit the scan range length if generating scan ranges.
         long maxBlockSize =
             Math.max(partitionFs.getDefaultBlockSize(partition.getLocationPath()),
-                HdfsPartition.FileDescriptor.MIN_SYNTHETIC_BLOCK_SIZE);
+                FileDescriptor.MIN_SYNTHETIC_BLOCK_SIZE);
         if (scanRangeBytesLimit > 0) {
           scanRangeBytesLimit = Math.min(scanRangeBytesLimit, maxBlockSize);
         } else {
@@ -857,7 +858,7 @@ public class HdfsScanNode extends ScanNode {
    * the scan ranges can be (may be ignored if the file is not splittable).
    */
   private void generateScanRangeSpecs(
-      HdfsPartition partition, HdfsPartition.FileDescriptor fileDesc, long maxBlockSize) {
+      FeFsPartition partition, FileDescriptor fileDesc, long maxBlockSize) {
     Preconditions.checkArgument(fileDesc.getNumFileBlocks() == 0);
     Preconditions.checkArgument(maxBlockSize > 0);
     if (fileDesc.getFileLength() <= 0) return;
@@ -884,8 +885,8 @@ public class HdfsScanNode extends ScanNode {
    * TScanRangeLocationLists are added to scanRanges_. A pair is returned that indicates
    * whether the file has a missing disk id and the maximum scan range (in bytes) found.
    */
-  private Pair<Boolean, Long> transformBlocksToScanRanges(HdfsPartition partition,
-      HdfsPartition.FileDescriptor fileDesc, boolean fsHasBlocks,
+  private Pair<Boolean, Long> transformBlocksToScanRanges(FeFsPartition partition,
+      FileDescriptor fileDesc, boolean fsHasBlocks,
       long scanRangeBytesLimit, Analyzer analyzer) {
     Preconditions.checkArgument(fileDesc.getNumFileBlocks() > 0);
     boolean fileDescMissingDiskIds = false;
@@ -1052,7 +1053,7 @@ public class HdfsScanNode extends ScanNode {
     partitionNumRows_ = -1;
     hasCorruptTableStats_ = false;
     if (tbl_.getNumClusteringCols() > 0) {
-      for (HdfsPartition p: partitions_) {
+      for (FeFsPartition p: partitions_) {
         // Check for corrupt partition stats
         long partNumRows = p.getNumRows();
         if (partNumRows < -1  || (partNumRows == 0 && p.getSize() > 0))  {
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
index 46709c0..0de2edb 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
@@ -21,6 +21,7 @@ import java.util.List;
 
 import org.apache.impala.analysis.DescriptorTable;
 import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.Table;
@@ -57,7 +58,7 @@ public class HdfsTableSink extends TableSink {
   // populate the RowGroup::sorting_columns list in parquet files.
   private List<Integer> sortColumns_ = Lists.newArrayList();
 
-  public HdfsTableSink(Table targetTable, List<Expr> partitionKeyExprs,
+  public HdfsTableSink(FeTable targetTable, List<Expr> partitionKeyExprs,
       boolean overwrite, boolean inputIsClustered, List<Integer> sortColumns) {
     super(targetTable, Op.INSERT);
     Preconditions.checkState(targetTable instanceof HdfsTable);
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinNode.java b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
index 030d706..815b31f 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
@@ -29,7 +29,7 @@ import org.apache.impala.analysis.SlotDescriptor;
 import org.apache.impala.analysis.SlotRef;
 import org.apache.impala.analysis.TupleId;
 import org.apache.impala.catalog.ColumnStats;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.thrift.TJoinDistributionMode;
@@ -422,7 +422,7 @@ public abstract class JoinNode extends PlanNode {
     private static boolean hasNumRowsAndNdvStats(SlotDescriptor slotDesc) {
       if (slotDesc.getColumn() == null) return false;
       if (!slotDesc.getStats().hasNumDistinctValues()) return false;
-      Table tbl = slotDesc.getParent().getTable();
+      FeTable tbl = slotDesc.getParent().getTable();
       if (tbl == null || tbl.getNumRows() == -1) return false;
       return true;
     }
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java b/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
index f75b170..ef7cbab 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
@@ -22,7 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.impala.analysis.DescriptorTable;
-import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.thrift.TDataSink;
 import org.apache.impala.thrift.TDataSinkType;
 import org.apache.impala.thrift.TExplainLevel;
@@ -43,7 +43,7 @@ public class KuduTableSink extends TableSink {
   // expression i matches a column index into the Kudu schema at targetColdIdxs[i].
   private final ArrayList<Integer> targetColIdxs_;
 
-  public KuduTableSink(Table targetTable, Op sinkOp,
+  public KuduTableSink(FeTable targetTable, Op sinkOp,
       List<Integer> referencedColumns) {
     super(targetTable, sinkOp);
     targetColIdxs_ = referencedColumns != null
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java b/fe/src/main/java/org/apache/impala/planner/Planner.java
index 7aeac60..0967319 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -32,9 +32,9 @@ import org.apache.impala.analysis.JoinOperator;
 import org.apache.impala.analysis.QueryStmt;
 import org.apache.impala.analysis.SortInfo;
 import org.apache.impala.analysis.TupleId;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.RuntimeEnv;
@@ -182,7 +182,7 @@ public class Planner {
       if (ctx_.isInsertOrCtas()) {
         InsertStmt insertStmt = ctx_.getAnalysisResult().getInsertStmt();
         List<Expr> exprs = Lists.newArrayList();
-        Table targetTable = insertStmt.getTargetTable();
+        FeTable targetTable = insertStmt.getTargetTable();
         Preconditions.checkNotNull(targetTable);
         if (targetTable instanceof KuduTable) {
           if (ctx_.isInsert()) {
diff --git a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
index 89f14d1..5e38f83 100644
--- a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
+++ b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
@@ -38,8 +38,8 @@ import org.apache.impala.analysis.SlotRef;
 import org.apache.impala.analysis.TupleDescriptor;
 import org.apache.impala.analysis.TupleId;
 import org.apache.impala.analysis.TupleIsNullPredicate;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduColumn;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.IdGenerator;
 import org.apache.impala.common.InternalException;
@@ -734,7 +734,7 @@ public final class RuntimeFilterGenerator {
       ScanNode targetNode) {
     Preconditions.checkState(targetExpr.isBoundByTupleIds(targetNode.getTupleIds()));
     TupleDescriptor baseTblDesc = targetNode.getTupleDesc();
-    Table tbl = baseTblDesc.getTable();
+    FeTable tbl = baseTblDesc.getTable();
     if (tbl.getNumClusteringCols() == 0) return false;
     List<SlotId> sids = Lists.newArrayList();
     targetExpr.getIds(null, sids);
diff --git a/fe/src/main/java/org/apache/impala/planner/ScanNode.java b/fe/src/main/java/org/apache/impala/planner/ScanNode.java
index e23ea93..f2daef5 100644
--- a/fe/src/main/java/org/apache/impala/planner/ScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/ScanNode.java
@@ -21,8 +21,8 @@ import java.util.List;
 
 import org.apache.impala.analysis.SlotDescriptor;
 import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.NotImplementedException;
 import org.apache.impala.thrift.TNetworkAddress;
@@ -204,7 +204,7 @@ abstract public class ScanNode extends PlanNode {
 
   @Override
   protected String getDisplayLabelDetail() {
-    Table table = desc_.getTable();
+    FeTable table = desc_.getTable();
     List<String> path = Lists.newArrayList();
     path.add(table.getDb().getName());
     path.add(table.getName());
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index fb669c5..3a1c956 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -54,11 +54,11 @@ import org.apache.impala.analysis.UnionStmt;
 import org.apache.impala.analysis.UnionStmt.UnionOperand;
 import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
-import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
@@ -1212,7 +1212,7 @@ public class SingleNodePlanner {
     // Do partition pruning before deciding which slots to materialize because we might
     // end up removing some predicates.
     HdfsPartitionPruner pruner = new HdfsPartitionPruner(tupleDesc);
-    List<HdfsPartition> partitions = pruner.prunePartitions(analyzer, conjuncts, false);
+    List<FeFsPartition> partitions = pruner.prunePartitions(analyzer, conjuncts, false);
 
     // Mark all slots referenced by the remaining conjuncts as materialized.
     analyzer.materializeSlots(conjuncts);
@@ -1229,7 +1229,7 @@ public class SingleNodePlanner {
     if (fastPartitionKeyScans && tupleDesc.hasClusteringColsOnly()) {
       HashSet<List<Expr>> uniqueExprs = new HashSet<List<Expr>>();
 
-      for (HdfsPartition partition: partitions) {
+      for (FeFsPartition partition: partitions) {
         // Ignore empty partitions to match the behavior of the scan based approach.
         if (partition.isDefaultPartition() || partition.getSize() == 0) {
           continue;
@@ -1301,8 +1301,9 @@ public class SingleNodePlanner {
       Expr.removeDuplicates(conjuncts);
     }
 
-    Table table = tblRef.getTable();
-    if (table instanceof HdfsTable) {
+    // TODO(todd) introduce FE interfaces for DataSourceTable, HBaseTable, KuduTable
+    FeTable table = tblRef.getTable();
+    if (table instanceof FeFsTable) {
       return createHdfsScanPlan(tblRef, aggInfo, conjuncts, analyzer);
     } else if (table instanceof DataSourceTable) {
       scanNode = new DataSourceScanNode(ctx_.getNextNodeId(), tblRef.getDesc(),
diff --git a/fe/src/main/java/org/apache/impala/planner/TableSink.java b/fe/src/main/java/org/apache/impala/planner/TableSink.java
index 4c4b35e..a702206 100644
--- a/fe/src/main/java/org/apache/impala/planner/TableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/TableSink.java
@@ -20,10 +20,10 @@ package org.apache.impala.planner;
 import java.util.List;
 
 import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.KuduTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.thrift.TSinkAction;
 
 import com.google.common.base.Preconditions;
@@ -73,11 +73,11 @@ public abstract class TableSink extends DataSink {
   }
 
   // Table which is to be populated by this sink.
-  protected final Table targetTable_;
+  protected final FeTable targetTable_;
   // The type of operation to be performed by this sink.
   protected final Op sinkOp_;
 
-  public TableSink(Table targetTable, Op sinkAction) {
+  public TableSink(FeTable targetTable, Op sinkAction) {
     targetTable_ = targetTable;
     sinkOp_ = sinkAction;
   }
@@ -90,7 +90,7 @@ public abstract class TableSink extends DataSink {
    * For HDFS tables 'sortColumns' specifies the indices into the list of non-clustering
    * columns of the target table that are stored in the 'sort.columns' table property.
    */
-  public static TableSink create(Table table, Op sinkAction,
+  public static TableSink create(FeTable table, Op sinkAction,
       List<Expr> partitionKeyExprs,  List<Integer> referencedColumns,
       boolean overwrite, boolean inputIsClustered, List<Integer> sortColumns) {
     Preconditions.checkNotNull(partitionKeyExprs);
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 82422ab..e062eeb 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -61,6 +61,9 @@ import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.ColumnNotFoundException;
 import org.apache.impala.catalog.DataSource;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsPartition;
@@ -811,7 +814,9 @@ public class CatalogOpExecutor {
       HdfsTable table) throws ImpalaException {
     Preconditions.checkState(params.isSetPartition_stats());
     List<HdfsPartition> modifiedParts = Lists.newArrayList();
-    for (HdfsPartition partition: table.getPartitions()) {
+    for (FeFsPartition fePartition: table.getPartitions()) {
+      // TODO(todd): avoid downcast to implementation class
+      HdfsPartition partition = (HdfsPartition)fePartition;
       if (partition.isDefaultPartition()) continue;
 
       // NULL keys are returned as 'NULL' in the partition_stats map, so don't substitute
@@ -1255,7 +1260,9 @@ public class CatalogOpExecutor {
 
     // List of partitions that were modified as part of this operation.
     List<HdfsPartition> modifiedParts = Lists.newArrayList();
-    for (HdfsPartition part: hdfsTable.getPartitions()) {
+    for (FeFsPartition fePart: hdfsTable.getPartitions()) {
+      // TODO(todd): avoid downcast
+      HdfsPartition part = (HdfsPartition) fePart;
       boolean isModified = false;
       // The default partition is an Impala-internal abstraction and is not
       // represented in the Hive Metastore.
@@ -1466,9 +1473,9 @@ public class CatalogOpExecutor {
    * uncaching all table data, if applicable. Throws no exceptions, only logs errors.
    * Does not update the HMS.
    */
-  private static void uncacheTable(Table table) {
-    if (!(table instanceof HdfsTable)) return;
-    HdfsTable hdfsTable = (HdfsTable) table;
+  private static void uncacheTable(FeTable table) {
+    if (!(table instanceof FeFsTable)) return;
+    FeFsTable hdfsTable = (FeFsTable) table;
     if (hdfsTable.isMarkedCached()) {
       try {
         HdfsCachingUtil.removeTblCacheDirective(table.getMetaStoreTable());
@@ -1477,7 +1484,7 @@ public class CatalogOpExecutor {
       }
     }
     if (table.getNumClusteringCols() > 0) {
-      for (HdfsPartition part: hdfsTable.getPartitions()) {
+      for (FeFsPartition part: hdfsTable.getPartitions()) {
         if (part.isMarkedCached()) {
           try {
             HdfsCachingUtil.removePartitionCacheDirective(part);
@@ -1523,7 +1530,7 @@ public class CatalogOpExecutor {
       catalog_.getLock().writeLock().unlock();
       try {
         HdfsTable hdfsTable = (HdfsTable)table;
-        for (HdfsPartition part: hdfsTable.getPartitions()) {
+        for (FeFsPartition part: hdfsTable.getPartitions()) {
           if (part.isDefaultPartition()) continue;
           FileSystemUtil.deleteAllVisibleFiles(new Path(part.getLocation()));
         }
@@ -2563,7 +2570,9 @@ public class CatalogOpExecutor {
       if (tbl.getNumClusteringCols() > 0) {
         // If this is a partitioned table, submit cache directives for all uncached
         // partitions.
-        for (HdfsPartition partition: hdfsTable.getPartitions()) {
+        for (FeFsPartition fePartition: hdfsTable.getPartitions()) {
+          // TODO(todd): avoid downcast
+          HdfsPartition partition = (HdfsPartition) fePartition;
           // No need to cache the default partition because it contains no files and is
           // not referred to by scan nodes.
           if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
@@ -2618,7 +2627,9 @@ public class CatalogOpExecutor {
       if (cacheDirId != null) HdfsCachingUtil.removeTblCacheDirective(msTbl);
       // Uncache all table partitions.
       if (tbl.getNumClusteringCols() > 0) {
-        for (HdfsPartition partition: hdfsTable.getPartitions()) {
+        for (FeFsPartition fePartition: hdfsTable.getPartitions()) {
+          // TODO(todd): avoid downcast
+          HdfsPartition partition = (HdfsPartition) fePartition;
           if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
             continue;
           }
@@ -3285,7 +3296,7 @@ public class CatalogOpExecutor {
         HashSet<String> partsToCreate =
             Sets.newHashSet(update.getCreated_partitions());
         partsToLoadMetadata = Sets.newHashSet(partsToCreate);
-        for (HdfsPartition partition: ((HdfsTable) table).getPartitions()) {
+        for (FeFsPartition partition: ((HdfsTable) table).getPartitions()) {
           // Skip dummy default partition.
           long partitionId = partition.getId();
           if (partitionId == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index c24ab12..beab12e 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -78,6 +78,8 @@ import org.apache.impala.catalog.DataSource;
 import org.apache.impala.catalog.DataSourceTable;
 import org.apache.impala.catalog.DatabaseNotFoundException;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HBaseTable;
 import org.apache.impala.catalog.HdfsTable;
@@ -645,15 +647,15 @@ public class Frontend {
    * Returns all databases in catalog cache that match the pattern of 'matcher' and are
    * accessible to 'user'.
    */
-  public List<Db> getDbs(PatternMatcher matcher, User user)
+  public List<? extends FeDb> getDbs(PatternMatcher matcher, User user)
       throws InternalException {
-    List<Db> dbs = impaladCatalog_.get().getDbs(matcher);
+    List<? extends FeDb> dbs = impaladCatalog_.get().getDbs(matcher);
     // If authorization is enabled, filter out the databases the user does not
     // have permissions on.
     if (authzConfig_.isEnabled()) {
-      Iterator<Db> iter = dbs.iterator();
+      Iterator<? extends FeDb> iter = dbs.iterator();
       while (iter.hasNext()) {
-        Db db = iter.next();
+        FeDb db = iter.next();
         if (!isAccessibleToUser(db, user)) iter.remove();
       }
     }
@@ -663,7 +665,7 @@ public class Frontend {
   /**
    * Check whether database is accessible to given user.
    */
-  private boolean isAccessibleToUser(Db db, User user)
+  private boolean isAccessibleToUser(FeDb db, User user)
       throws InternalException {
     if (db.getName().toLowerCase().equals(Catalog.DEFAULT_DB.toLowerCase())) {
       // Default DB should always be shown.
@@ -717,8 +719,8 @@ public class Frontend {
   public TResultSet getTableStats(String dbName, String tableName, TShowStatsOp op)
       throws ImpalaException {
     Table table = impaladCatalog_.get().getTable(dbName, tableName);
-    if (table instanceof HdfsTable) {
-      return ((HdfsTable) table).getTableStats();
+    if (table instanceof FeFsTable) {
+      return ((FeFsTable) table).getTableStats();
     } else if (table instanceof HBaseTable) {
       return ((HBaseTable) table).getTableStats();
     } else if (table instanceof DataSourceTable) {
diff --git a/fe/src/main/java/org/apache/impala/service/JniCatalog.java b/fe/src/main/java/org/apache/impala/service/JniCatalog.java
index 1d822e4..daa57ab 100644
--- a/fe/src/main/java/org/apache/impala/service/JniCatalog.java
+++ b/fe/src/main/java/org/apache/impala/service/JniCatalog.java
@@ -30,6 +30,7 @@ import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.CatalogServiceCatalog;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
@@ -56,7 +57,6 @@ import org.apache.impala.thrift.TUpdateCatalogRequest;
 import org.apache.impala.thrift.TBackendGflags;
 import org.apache.impala.util.GlogAppender;
 import org.apache.impala.util.PatternMatcher;
-import org.apache.sentry.hdfs.ThriftSerializer;
 import org.apache.thrift.TException;
 import org.apache.thrift.TSerializer;
 import org.apache.thrift.protocol.TBinaryProtocol;
@@ -172,7 +172,7 @@ public class JniCatalog {
     List<Db> dbs = catalog_.getDbs(PatternMatcher.MATCHER_MATCH_ALL);
     TGetDbsResult result = new TGetDbsResult();
     List<TDatabase> tDbs = Lists.newArrayListWithCapacity(dbs.size());
-    for (Db db: dbs) tDbs.add(db.toThrift());
+    for (FeDb db: dbs) tDbs.add(db.toThrift());
     result.setDbs(tDbs);
     TSerializer serializer = new TSerializer(protocolFactory_);
     return serializer.serialize(result);
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index 20a028d..ea3b358 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -40,7 +40,7 @@ import org.apache.impala.authorization.AuthorizationConfig;
 import org.apache.impala.authorization.ImpalaInternalAdminUser;
 import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.DataSource;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.Role;
 import org.apache.impala.catalog.StructType;
@@ -84,7 +84,6 @@ import org.apache.impala.thrift.TShowRolesResult;
 import org.apache.impala.thrift.TShowStatsOp;
 import org.apache.impala.thrift.TShowStatsParams;
 import org.apache.impala.thrift.TTableName;
-import org.apache.impala.thrift.TUniqueId;
 import org.apache.impala.thrift.TUpdateCatalogCacheRequest;
 import org.apache.impala.thrift.TUpdateMembershipRequest;
 import org.apache.impala.util.GlogAppender;
@@ -287,11 +286,11 @@ public class JniFrontend {
     User user = params.isSetSession() ?
         new User(TSessionStateUtil.getEffectiveUser(params.getSession())) :
         ImpalaInternalAdminUser.getInstance();
-    List<Db> dbs = frontend_.getDbs(
+    List<? extends FeDb> dbs = frontend_.getDbs(
         PatternMatcher.createHivePatternMatcher(params.pattern), user);
     TGetDbsResult result = new TGetDbsResult();
     List<TDatabase> tDbs = Lists.newArrayListWithCapacity(dbs.size());
-    for (Db db: dbs) tDbs.add(db.toThrift());
+    for (FeDb db: dbs) tDbs.add(db.toThrift());
     result.setDbs(tDbs);
     TSerializer serializer = new TSerializer(protocolFactory_);
     try {
diff --git a/fe/src/main/java/org/apache/impala/service/MetadataOp.java b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
index f1cb077..ea4864f 100644
--- a/fe/src/main/java/org/apache/impala/service/MetadataOp.java
+++ b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
@@ -28,7 +28,7 @@ import org.apache.impala.analysis.TableName;
 import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.Catalog;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.ImpaladCatalog;
 import org.apache.impala.catalog.PrimitiveType;
@@ -267,7 +267,7 @@ public class MetadataOp {
     }
 
     ImpaladCatalog catalog = fe.getCatalog();
-    for (Db db: fe.getDbs(schemaPatternMatcher, user)) {
+    for (FeDb db: fe.getDbs(schemaPatternMatcher, user)) {
       if (fnPatternMatcher != PatternMatcher.MATCHER_MATCH_NONE) {
         // Get function metadata
         List<Function> fns = db.getFunctions(null, fnPatternMatcher);
diff --git a/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java b/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
index 65902ac..99e5afa 100644
--- a/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
+++ b/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
@@ -32,7 +32,6 @@ import org.apache.impala.catalog.MapType;
 import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import com.google.common.collect.Lists;
 
diff --git a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
index 63e4924..81402b3 100644
--- a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.log4j.Logger;
 
 import org.apache.impala.analysis.TableName;
+import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.ImpalaException;
@@ -154,7 +155,7 @@ public class HdfsCachingUtil {
    * data. Also updates the partition's metadata to remove the cache directive ID.
    * No-op if the table is not cached.
    */
-  public static void removePartitionCacheDirective(HdfsPartition part)
+  public static void removePartitionCacheDirective(FeFsPartition part)
       throws ImpalaException {
     Preconditions.checkNotNull(part);
     Map<String, String> parameters = part.getParameters();
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index f482140..58f73fa 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -39,6 +39,7 @@ import org.apache.impala.authorization.AuthorizeableTable;
 import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.AuthorizationException;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.ImpaladCatalog;
 import org.apache.impala.catalog.ScalarFunction;
 import org.apache.impala.catalog.Type;
@@ -2118,7 +2119,8 @@ public class AuthorizationTest extends FrontendTestBase {
         "functional_avro", "functional_parquet", "functional_seq_snap",
         "functional_text_lzo", "tpcds", "tpch");
 
-    List<Db> dbs = fe_.getDbs(PatternMatcher.createHivePatternMatcher("*"), USER);
+    List<? extends FeDb> dbs = fe_.getDbs(PatternMatcher.createHivePatternMatcher("*"),
+        USER);
     assertEquals(expectedDbs, extractDbNames(dbs));
 
     dbs = fe_.getDbs(PatternMatcher.MATCHER_MATCH_ALL, USER);
@@ -2141,9 +2143,9 @@ public class AuthorizationTest extends FrontendTestBase {
     assertEquals(expectedDbs, extractDbNames(dbs));
   }
 
-  private List<String> extractDbNames(List<Db> dbs) {
+  private List<String> extractDbNames(List<? extends FeDb> dbs) {
     List<String> names = Lists.newArrayListWithCapacity(dbs.size());
-    for (Db db: dbs) names.add(db.getName());
+    for (FeDb db: dbs) names.add(db.getName());
     return names;
   }
 
diff --git a/fe/src/test/java/org/apache/impala/analysis/StmtMetadataLoaderTest.java b/fe/src/test/java/org/apache/impala/analysis/StmtMetadataLoaderTest.java
index 39416f8..29eddfe 100644
--- a/fe/src/test/java/org/apache/impala/analysis/StmtMetadataLoaderTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/StmtMetadataLoaderTest.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
 import org.apache.impala.analysis.StmtMetadataLoader.StmtTableCache;
 import org.apache.impala.authorization.AuthorizationConfig;
 import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
@@ -65,7 +66,7 @@ public class StmtMetadataLoaderTest {
   private void validateTables(StmtTableCache stmtTableCache, String[] expectedTables) {
     String[] actualTables = new String[stmtTableCache.tables.size()];
     int idx = 0;
-    for (Table t: stmtTableCache.tables.values()) {
+    for (FeTable t: stmtTableCache.tables.values()) {
       Assert.assertTrue(t.isLoaded());
       actualTables[idx++] = t.getFullName();
     }
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index 6f63380..3750149 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -92,7 +92,7 @@ public class CatalogObjectToFromThriftTest {
       HdfsTable newHdfsTable = (HdfsTable) newTable;
       Assert.assertEquals(newHdfsTable.getPartitions().size(), 25);
       boolean foundDefaultPartition = false;
-      for (HdfsPartition hdfsPart: newHdfsTable.getPartitions()) {
+      for (FeFsPartition hdfsPart: newHdfsTable.getPartitions()) {
         if (hdfsPart.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
           Assert.assertEquals(foundDefaultPartition, false);
           foundDefaultPartition = true;
@@ -215,9 +215,9 @@ public class CatalogObjectToFromThriftTest {
     // Get any partition with valid HMS parameters to create a
     // dummy partition.
     HdfsPartition part = null;
-    for (HdfsPartition partition: hdfsTable.getPartitions()) {
+    for (FeFsPartition partition: hdfsTable.getPartitions()) {
       if (!partition.isDefaultPartition()) {
-        part = partition;
+        part = (HdfsPartition) partition;
         break;
       }
     }
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
index 897494e..04fe6cf 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
@@ -289,13 +289,13 @@ public class CatalogTest {
   public void TestPartitions() throws CatalogException {
     HdfsTable table =
         (HdfsTable) catalog_.getOrLoadTable("functional", "AllTypes");
-    Collection<HdfsPartition> partitions = table.getPartitions();
+    Collection<? extends FeFsPartition> partitions = table.getPartitions();
 
     // check that partition keys cover the date range 1/1/2009-12/31/2010
     // and that we have one file per partition, plus the default partition
     assertEquals(25, partitions.size());
     Set<Long> months = Sets.newHashSet();
-    for (HdfsPartition p: partitions) {
+    for (FeFsPartition p: partitions) {
       if (p.getId() == DEFAULT_PARTITION_ID) {
         continue;
       }
diff --git a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
index d5cce94..820b33a 100644
--- a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
+++ b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
@@ -28,11 +28,11 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 import org.apache.impala.catalog.Catalog;
-import org.apache.impala.catalog.Db;
-import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.FeDb;
+import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.util.PatternMatcher;
 
@@ -61,9 +61,9 @@ public class BlockIdGenerator {
 
       // Load all tables in the catalog
       Catalog catalog = CatalogServiceTestCatalog.create();
-      for (Db database: catalog.getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
+      for (FeDb database: catalog.getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
         for (String tableName: database.getAllTableNames()) {
-          Table table = database.getTable(tableName);
+          FeTable table = database.getTable(tableName);
           // Only do this for hdfs tables
           if (table == null || !(table instanceof HdfsTable)) {
             continue;
@@ -72,7 +72,7 @@ public class BlockIdGenerator {
 
           // Write the output as <tablename>: <blockid1> <blockid2> <etc>
           writer.write(tableName + ":");
-          for (HdfsPartition partition: hdfsTable.getPartitions()) {
+          for (FeFsPartition partition: hdfsTable.getPartitions()) {
             // Ignore the default partition.
             if (partition.getId() ==
                     ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
diff --git a/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java b/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
index 32b5ee7..fefe34e 100644
--- a/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
+++ b/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
@@ -24,6 +24,7 @@ import org.apache.impala.authorization.AuthorizationConfig;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.CatalogServiceCatalog;
 import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.HdfsCachePool;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.ImpaladCatalog;
@@ -52,9 +53,9 @@ public class ImpaladTestCatalog extends ImpaladCatalog {
     CatalogServiceCatalog catalogServerCatalog =
         CatalogServiceTestCatalog.createWithAuth(authzConfig.getSentryConfig());
     // Bootstrap the catalog by adding all dbs, tables, and functions.
-    for (Db db: catalogServerCatalog.getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
+    for (FeDb db: catalogServerCatalog.getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
       // Adding DB should include all tables/fns in that database.
-      addDb(db);
+      addDb((Db)db);
     }
     authPolicy_ = catalogServerCatalog.getAuthPolicy();
     srcCatalog_ = catalogServerCatalog;