You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2019/02/21 19:39:59 UTC

[impala] 11/13: IMPALA-7141 (part 1): clean up handling of default/dummy partition

This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch 2.x
in repository https://gitbox.apache.org/repos/asf/impala.git

commit c50aa17da6854f52efee02df3f11a170170a938e
Author: Todd Lipcon <to...@cloudera.com>
AuthorDate: Wed Jun 13 13:55:22 2018 -0700

    IMPALA-7141 (part 1): clean up handling of default/dummy partition
    
    Currently, HdfsTable inconsistently uses the term "default partition"
    to refer to two different concepts:
    
    1) For unpartitioned tables, a single partition with ID 1 and no partition
       keys is created and added to the partition map.
    
    2) All tables have an additional partition added with partition ID -1
       which acts as a sort of prototype for partition creation: when new
       partitions are created during an INSERT operation, the file format
       and other related options are copied out of this special partition.
    
       This partition is inconsistently referred to as either the "default
       partition" or the "dummy partition".
    
    The handling of this second case (the partition with id -1) was somewhat
    messy:
    
    - the partition shows up in the partitionMap_ member, but does not show
      up in the partitionIds_ member.
    
    - almost all of the call sites that iterate through the partitions of an
      HdfsTable instance ended up skipping over the dummy partition.
    
    - several call sites called getPartitions().size() but then had to
      adjust the result by subtracting one in order to actually count the
      number of partitions in a table.
    
    - similarly, test assertions had to assert that tables with 24
      partitions had an expected partition map size of 25.
    
    In order to address the above, this patch makes the following changes:
    
    - getPartitions() and getPartitionMap() no longer include the dummy
      partition. This removes a bunch of special case checks to skip over
      the dummy partition or to adjust partition counts based on it.
    
    - to clarify the purpose of this partition, references to it are renamed
      to "prototype partition" instead of "default partition".
    
    - when converting the HdfsTable to/from Thrift, the prototype partition
      is included in its own field in the struct, instead of being stuffed
      into the same map with the true partitions of the table. This reflects
      the fact that this partition is special (eg missing fields like
      'location' which otherwise are required for real partitions)
    
    This change should should be entirely internal with no functional
    differences. As such, the only testing changes are some fixes for
    assertions on the Thrift serialized structures and other internals.
    
    Change-Id: I15e91b50eb7c2a5e0bac8c33d603d6cd8cbaca2e
    Reviewed-on: http://gerrit.cloudera.org:8080/10711
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Todd Lipcon <to...@apache.org>
---
 be/src/exec/hdfs-table-sink.cc                     | 121 ++++++++++-----------
 be/src/exec/hdfs-table-sink.h                      |   5 +-
 be/src/runtime/descriptors.cc                      |   2 +
 be/src/runtime/descriptors.h                       |   5 +
 common/thrift/CatalogObjects.thrift                |  11 +-
 common/thrift/ImpalaInternalService.thrift         |   3 -
 .../apache/impala/analysis/ComputeStatsStmt.java   |   4 +-
 .../org/apache/impala/analysis/LoadDataStmt.java   |  15 +--
 .../org/apache/impala/catalog/FeFsPartition.java   |   6 -
 .../org/apache/impala/catalog/HdfsPartition.java   |  14 +--
 .../java/org/apache/impala/catalog/HdfsTable.java  |  62 ++++++-----
 .../org/apache/impala/planner/HdfsScanNode.java    |   2 +-
 .../apache/impala/planner/SingleNodePlanner.java   |   4 +-
 .../apache/impala/service/CatalogOpExecutor.java   |  29 +----
 .../catalog/CatalogObjectToFromThriftTest.java     |  71 ++++++------
 .../org/apache/impala/catalog/CatalogTest.java     |   9 +-
 .../org/apache/impala/common/FrontendTestBase.java |   2 +-
 .../apache/impala/testutil/BlockIdGenerator.java   |   5 -
 18 files changed, 164 insertions(+), 206 deletions(-)

diff --git a/be/src/exec/hdfs-table-sink.cc b/be/src/exec/hdfs-table-sink.cc
index 4b718ec..3b7dc0f 100644
--- a/be/src/exec/hdfs-table-sink.cc
+++ b/be/src/exec/hdfs-table-sink.cc
@@ -61,7 +61,7 @@ HdfsTableSink::HdfsTableSink(const RowDescriptor* row_desc, const TDataSink& tsi
     RuntimeState* state)
   : DataSink(row_desc, "HdfsTableSink", state),
     table_desc_(nullptr),
-    default_partition_(nullptr),
+    prototype_partition_(nullptr),
     table_id_(tsink.table_sink.target_table_id),
     skip_header_line_count_(
         tsink.table_sink.hdfs_table_sink.__isset.skip_header_line_count ?
@@ -150,74 +150,67 @@ Status HdfsTableSink::Open(RuntimeState* state) {
   DCHECK_EQ(partition_key_exprs_.size(), partition_key_expr_evals_.size());
   RETURN_IF_ERROR(ScalarExprEvaluator::Open(partition_key_expr_evals_, state));
 
-  // Get file format for default partition in table descriptor, and build a map from
-  // partition key values to partition descriptor for multiple output format support. The
-  // map is keyed on the concatenation of the non-constant keys of the PARTITION clause of
-  // the INSERT statement.
+  // Build a map from partition key values to partition descriptor for multiple output
+  // format support. The map is keyed on the concatenation of the non-constant keys of
+  // the PARTITION clause of the INSERT statement.
   for (const HdfsTableDescriptor::PartitionIdToDescriptorMap::value_type& id_to_desc:
        table_desc_->partition_descriptors()) {
-    if (id_to_desc.first == g_ImpalaInternalService_constants.DEFAULT_PARTITION_ID) {
-      default_partition_ = id_to_desc.second;
-    } else {
-      // Build a map whose key is computed from the value of dynamic partition keys for a
-      // particular partition, and whose value is the descriptor for that partition.
-
-      // True if this partition might be written to, false otherwise.
-      // A partition may be written to iff:
-      // For all partition key exprs e, either:
-      //   1. e is not constant
-      //   2. The value supplied by the query for this partition key is equal to e's
-      //   constant value.
-      // Only relevant partitions are remembered in partition_descriptor_map_.
-      bool relevant_partition = true;
-      HdfsPartitionDescriptor* partition = id_to_desc.second;
-      DCHECK_EQ(partition->partition_key_value_evals().size(),
-          partition_key_expr_evals_.size());
-      vector<ScalarExprEvaluator*> dynamic_partition_key_value_evals;
-      for (size_t i = 0; i < partition_key_expr_evals_.size(); ++i) {
-        // Remember non-constant partition key exprs for building hash table of Hdfs files
-        DCHECK(&partition_key_expr_evals_[i]->root() == partition_key_exprs_[i]);
-        if (!partition_key_exprs_[i]->is_constant()) {
-          dynamic_partition_key_value_evals.push_back(
-              partition->partition_key_value_evals()[i]);
-        } else {
-          // Deal with the following: one partition has (year=2009, month=3); another has
-          // (year=2010, month=3).
-          // A query like: INSERT INTO TABLE... PARTITION(year=2009) SELECT month FROM...
-          // would lead to both partitions having the same key modulo ignored constant
-          // partition keys. So only keep a reference to the partition which matches
-          // partition_key_values for constant values, since only that is written to.
-          void* table_partition_key_value =
-              partition->partition_key_value_evals()[i]->GetValue(nullptr);
-          void* target_partition_key_value =
-              partition_key_expr_evals_[i]->GetValue(nullptr);
-          if (table_partition_key_value == nullptr
-              && target_partition_key_value == nullptr) {
-            continue;
-          }
-          if (table_partition_key_value == nullptr
-              || target_partition_key_value == nullptr
-              || !RawValue::Eq(table_partition_key_value, target_partition_key_value,
-                     partition_key_expr_evals_[i]->root().type())) {
-            relevant_partition = false;
-            break;
-          }
+    // Build a map whose key is computed from the value of dynamic partition keys for a
+    // particular partition, and whose value is the descriptor for that partition.
+
+    // True if this partition might be written to, false otherwise.
+    // A partition may be written to iff:
+    // For all partition key exprs e, either:
+    //   1. e is not constant
+    //   2. The value supplied by the query for this partition key is equal to e's
+    //   constant value.
+    // Only relevant partitions are remembered in partition_descriptor_map_.
+    bool relevant_partition = true;
+    HdfsPartitionDescriptor* partition = id_to_desc.second;
+    DCHECK_EQ(partition->partition_key_value_evals().size(),
+        partition_key_expr_evals_.size());
+    vector<ScalarExprEvaluator*> dynamic_partition_key_value_evals;
+    for (size_t i = 0; i < partition_key_expr_evals_.size(); ++i) {
+      // Remember non-constant partition key exprs for building hash table of Hdfs files
+      DCHECK(&partition_key_expr_evals_[i]->root() == partition_key_exprs_[i]);
+      if (!partition_key_exprs_[i]->is_constant()) {
+        dynamic_partition_key_value_evals.push_back(
+            partition->partition_key_value_evals()[i]);
+      } else {
+        // Deal with the following: one partition has (year=2009, month=3); another has
+        // (year=2010, month=3).
+        // A query like: INSERT INTO TABLE... PARTITION(year=2009) SELECT month FROM...
+        // would lead to both partitions having the same key modulo ignored constant
+        // partition keys. So only keep a reference to the partition which matches
+        // partition_key_values for constant values, since only that is written to.
+        void* table_partition_key_value =
+            partition->partition_key_value_evals()[i]->GetValue(nullptr);
+        void* target_partition_key_value =
+            partition_key_expr_evals_[i]->GetValue(nullptr);
+        if (table_partition_key_value == nullptr
+            && target_partition_key_value == nullptr) {
+          continue;
+        }
+        if (table_partition_key_value == nullptr
+            || target_partition_key_value == nullptr
+            || !RawValue::Eq(table_partition_key_value, target_partition_key_value,
+                   partition_key_expr_evals_[i]->root().type())) {
+          relevant_partition = false;
+          break;
         }
-      }
-      if (relevant_partition) {
-        string key;
-        // Pass nullptr as row, since all of these expressions are constant, and can
-        // therefore be evaluated without a valid row context.
-        GetHashTblKey(nullptr, dynamic_partition_key_value_evals, &key);
-        DCHECK(partition_descriptor_map_.find(key) == partition_descriptor_map_.end())
-            << "Partitions with duplicate 'static' keys found during INSERT";
-        partition_descriptor_map_[key] = partition;
       }
     }
+    if (relevant_partition) {
+      string key;
+      // Pass nullptr as row, since all of these expressions are constant, and can
+      // therefore be evaluated without a valid row context.
+      GetHashTblKey(nullptr, dynamic_partition_key_value_evals, &key);
+      DCHECK(partition_descriptor_map_.find(key) == partition_descriptor_map_.end())
+          << "Partitions with duplicate 'static' keys found during INSERT";
+      partition_descriptor_map_[key] = partition;
+    }
   }
-  if (default_partition_ == nullptr) {
-    return Status("No default partition found for HdfsTextTableSink");
-  }
+  prototype_partition_ = CHECK_NOTNULL(table_desc_->prototype_partition_descriptor());
   return Status::OK();
 }
 
@@ -570,7 +563,7 @@ inline Status HdfsTableSink::GetOutputPartition(RuntimeState* state, const Tuple
   existing_partition = partition_keys_to_output_partitions_.find(key);
   if (existing_partition == partition_keys_to_output_partitions_.end()) {
     // Create a new OutputPartition, and add it to partition_keys_to_output_partitions.
-    const HdfsPartitionDescriptor* partition_descriptor = default_partition_;
+    const HdfsPartitionDescriptor* partition_descriptor = prototype_partition_;
     PartitionDescriptorMap::const_iterator it = partition_descriptor_map_.find(key);
     if (it != partition_descriptor_map_.end()) {
       partition_descriptor = it->second;
diff --git a/be/src/exec/hdfs-table-sink.h b/be/src/exec/hdfs-table-sink.h
index ef8b7a6..002e866 100644
--- a/be/src/exec/hdfs-table-sink.h
+++ b/be/src/exec/hdfs-table-sink.h
@@ -242,8 +242,9 @@ class HdfsTableSink : public DataSink {
   /// Descriptor of target table. Set in Prepare().
   const HdfsTableDescriptor* table_desc_;
 
-  /// Currently this is the default partition since we don't support multi-format sinks.
-  const HdfsPartitionDescriptor* default_partition_;
+  /// The partition descriptor used when creating new partitions from this sink.
+  /// Currently we don't support multi-format sinks.
+  const HdfsPartitionDescriptor* prototype_partition_;
 
   /// Table id resolved in Prepare() to set tuple_desc_;
   TableId table_id_;
diff --git a/be/src/runtime/descriptors.cc b/be/src/runtime/descriptors.cc
index bc983de..12de054 100644
--- a/be/src/runtime/descriptors.cc
+++ b/be/src/runtime/descriptors.cc
@@ -226,6 +226,8 @@ HdfsTableDescriptor::HdfsTableDescriptor(const TTableDescriptor& tdesc, ObjectPo
         pool->Add(new HdfsPartitionDescriptor(tdesc.hdfsTable, entry.second));
     partition_descriptors_[entry.first] = partition;
   }
+  prototype_partition_descriptor_ = pool->Add(new HdfsPartitionDescriptor(
+    tdesc.hdfsTable, tdesc.hdfsTable.prototype_partition));
   avro_schema_ = tdesc.hdfsTable.__isset.avroSchema ? tdesc.hdfsTable.avroSchema : "";
 }
 
diff --git a/be/src/runtime/descriptors.h b/be/src/runtime/descriptors.h
index acbce34..982ede3 100644
--- a/be/src/runtime/descriptors.h
+++ b/be/src/runtime/descriptors.h
@@ -328,6 +328,10 @@ class HdfsTableDescriptor : public TableDescriptor {
     return partition_descriptors_;
   }
 
+  const HdfsPartitionDescriptor* prototype_partition_descriptor() const {
+    return prototype_partition_descriptor_;
+  }
+
   virtual std::string DebugString() const;
 
  protected:
@@ -336,6 +340,7 @@ class HdfsTableDescriptor : public TableDescriptor {
   /// Special string to indicate NULL values in text-encoded columns.
   std::string null_column_value_;
   PartitionIdToDescriptorMap partition_descriptors_;
+  HdfsPartitionDescriptor* prototype_partition_descriptor_;
   /// Set to the table's Avro schema if this is an Avro table, empty string otherwise
   std::string avro_schema_;
 };
diff --git a/common/thrift/CatalogObjects.thrift b/common/thrift/CatalogObjects.thrift
index 0f71f5f..23da91f 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -281,6 +281,11 @@ struct THdfsPartition {
   18: optional bool has_incremental_stats
 }
 
+// Constant partition ID used for THdfsPartition.prototype_partition above.
+// Must be < 0 to avoid collisions
+const i64 PROTOTYPE_PARTITION_ID = -1;
+
+
 struct THdfsTable {
   1: required string hdfsBaseDir
 
@@ -296,9 +301,13 @@ struct THdfsTable {
   // Set to the table's Avro schema if this is an Avro table
   6: optional string avroSchema
 
-  // map from partition id to partition metadata
+  // Map from partition id to partition metadata.
+  // Does not include the special prototype partition -1 (see below).
   4: required map<i64, THdfsPartition> partitions
 
+  // Prototype partition, used when creating new partitions during insert.
+  10: required THdfsPartition prototype_partition
+
   // Each TNetworkAddress is a datanode which contains blocks of a file in the table.
   // Used so that each THdfsFileBlock can just reference an index in this list rather
   // than duplicate the list of network address, which helps reduce memory usage.
diff --git a/common/thrift/ImpalaInternalService.thrift b/common/thrift/ImpalaInternalService.thrift
index e0f809c..ef8e5a1 100644
--- a/common/thrift/ImpalaInternalService.thrift
+++ b/common/thrift/ImpalaInternalService.thrift
@@ -42,9 +42,6 @@ const i32 NUM_NODES_ALL_RACKS = -1
 // constants for TPlanNodeId
 const i32 INVALID_PLAN_NODE_ID = -1
 
-// Constant default partition ID, must be < 0 to avoid collisions
-const i64 DEFAULT_PARTITION_ID = -1;
-
 enum TParquetFallbackSchemaResolution {
   POSITION,
   NAME
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index dec3f45..9259e49 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -440,7 +440,6 @@ public class ComputeStatsStmt extends StatementBase {
         Collection<? extends FeFsPartition> allPartitions =
             hdfsTable.getPartitions();
         for (FeFsPartition p: allPartitions) {
-          if (p.isDefaultPartition()) continue;
           TPartitionStats partStats = p.getPartitionStats();
           if (!p.hasIncrementalStats() || tableIsMissingColStats) {
             if (partStats == null) {
@@ -458,7 +457,7 @@ public class ComputeStatsStmt extends StatementBase {
             validPartStats_.add(partStats);
           }
         }
-        if (expectedPartitions_.size() == hdfsTable.getPartitions().size() - 1) {
+        if (expectedPartitions_.size() == hdfsTable.getPartitions().size()) {
           expectedPartitions_.clear();
           expectAllPartitions_ = true;
         }
@@ -479,7 +478,6 @@ public class ComputeStatsStmt extends StatementBase {
             Sets.newHashSet(partitionSet_.getPartitions());
         Collection<? extends FeFsPartition> allPartitions = hdfsTable.getPartitions();
         for (FeFsPartition p : allPartitions) {
-          if (p.isDefaultPartition()) continue;
           if (targetPartitions.contains(p)) continue;
           TPartitionStats partStats = p.getPartitionStats();
           if (partStats != null) validPartStats_.add(partStats);
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index 3116eb4..e20a88c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -32,6 +32,7 @@ import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -205,24 +206,20 @@ public class LoadDataStmt extends StatementBase {
           "target table (%s) because Impala does not have WRITE access to HDFS " +
           "location: ", hdfsTable.getFullName());
 
-      FeFsPartition partition;
-      String location;
       if (partitionSpec_ != null) {
-        partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
-        location = partition.getLocation();
+        HdfsPartition partition = hdfsTable.getPartition(
+            partitionSpec_.getPartitionSpecKeyValues());
+        String location = partition.getLocation();
         if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
           throw new AnalysisException(noWriteAccessErrorMsg + location);
         }
       } else {
-        // "default" partition
-        partition = hdfsTable.getPartitionMap().get(
-            ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID);
-        location = hdfsTable.getLocation();
+        // No specific partition specified, so we need to check write access
+        // on the table as a whole.
         if (!hdfsTable.hasWriteAccess()) {
           throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
         }
       }
-      Preconditions.checkNotNull(partition);
     } catch (FileNotFoundException e) {
       throw new AnalysisException("File not found: " + e.getMessage(), e);
     } catch (IOException e) {
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
index 9d0c40d..52edeb8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
@@ -43,12 +43,6 @@ public interface FeFsPartition {
   long getId();
 
   /**
-   * @return true if this partition represents the "default partition" of an
-   * unpartitioned tabe
-   */
-  boolean isDefaultPartition();
-
-  /**
    * @return the table that contains this partition
    */
   FeFsTable getTable();
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index 3be49e8..f99a8d6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -42,7 +42,7 @@ import org.apache.impala.common.Reference;
 import org.apache.impala.fb.FbCompression;
 import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.fb.FbFileDesc;
-import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TExpr;
 import org.apache.impala.thrift.TExprNode;
@@ -67,7 +67,6 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-import com.google.common.primitives.Ints;
 import com.google.flatbuffers.FlatBufferBuilder;
 
 /**
@@ -444,11 +443,6 @@ public class HdfsPartition implements FeFsPartition {
   }
 
   @Override // FeFsPartition
-  public boolean isDefaultPartition() {
-    return id_ == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
-  }
-
-  @Override // FeFsPartition
   public boolean isCacheable() {
     return FileSystemUtil.isPathCacheable(new Path(getLocation()));
   }
@@ -755,13 +749,13 @@ public class HdfsPartition implements FeFsPartition {
         accessLevel);
   }
 
-  public static HdfsPartition defaultPartition(
+  public static HdfsPartition prototypePartition(
       HdfsTable table, HdfsStorageDescriptor storageDescriptor) {
     List<LiteralExpr> emptyExprList = Lists.newArrayList();
     List<FileDescriptor> emptyFileDescriptorList = Lists.newArrayList();
     return new HdfsPartition(table, null, emptyExprList,
         storageDescriptor, emptyFileDescriptorList,
-        ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID, null,
+        CatalogObjectsConstants.PROTOTYPE_PARTITION_ID, null,
         TAccessLevel.READ_WRITE);
   }
 
@@ -810,7 +804,7 @@ public class HdfsPartition implements FeFsPartition {
         thriftPartition.blockSize);
 
     List<LiteralExpr> literalExpr = Lists.newArrayList();
-    if (id != ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
+    if (id != CatalogObjectsConstants.PROTOTYPE_PARTITION_ID) {
       List<Column> clusterCols = Lists.newArrayList();
       for (int i = 0; i < table.getNumClusteringCols(); ++i) {
         clusterCols.add(table.getColumns().get(i));
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index ef4f2eb..30979cd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -65,7 +65,7 @@ import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.Reference;
 import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.service.BackendConfig;
-import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TCatalogObjectType;
 import org.apache.impala.thrift.TColumn;
@@ -93,9 +93,11 @@ import org.slf4j.LoggerFactory;
 
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Timer;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -187,6 +189,12 @@ public class HdfsTable extends Table implements FeFsTable {
   // Store all the partition ids of an HdfsTable.
   private final HashSet<Long> partitionIds_ = Sets.newHashSet();
 
+  // The partition used as a prototype when creating new partitions during
+  // insertion. New partitions inherit file format and other settings from
+  // the prototype.
+  @VisibleForTesting
+  HdfsPartition prototypePartition_;
+
   // Estimate (in bytes) of the incremental stats size per column per partition
   public static final long STATS_SIZE_PER_COLUMN_BYTES = 400;
 
@@ -359,9 +367,6 @@ public class HdfsTable extends Table implements FeFsTable {
     if (!isLocationCacheable()) return false;
     if (!isMarkedCached() && numClusteringCols_ > 0) {
       for (FeFsPartition partition: getPartitions()) {
-        if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-          continue;
-        }
         if (!partition.isCacheable()) {
           return false;
         }
@@ -663,7 +668,6 @@ public class HdfsTable extends Table implements FeFsTable {
     // Search through all the partitions and check if their partition key values
     // match the values being searched for.
     for (HdfsPartition partition: partitionMap_.values()) {
-      if (partition.isDefaultPartition()) continue;
       List<LiteralExpr> partitionValues = partition.getPartitionValues();
       Preconditions.checkState(partitionValues.size() == targetValues.size());
       boolean matchFound = true;
@@ -747,7 +751,7 @@ public class HdfsTable extends Table implements FeFsTable {
   }
 
   /**
-   * Resets any partition metadata, creates the default partition and sets the base
+   * Resets any partition metadata, creates the prototype partition and sets the base
    * table directory path as well as the caching info from the HMS table.
    */
   private void initializePartitionMetadata(
@@ -755,10 +759,7 @@ public class HdfsTable extends Table implements FeFsTable {
     Preconditions.checkNotNull(msTbl);
     resetPartitions();
     hdfsBaseDir_ = msTbl.getSd().getLocation();
-    // INSERT statements need to refer to this if they try to write to new partitions
-    // Scans don't refer to this because by definition all partitions they refer to
-    // exist.
-    addDefaultPartition(msTbl.getSd());
+    setPrototypePartition(msTbl.getSd());
 
     // We silently ignore cache directives that no longer exist in HDFS, and remove
     // non-existing cache directives from the parameters.
@@ -1187,16 +1188,15 @@ public class HdfsTable extends Table implements FeFsTable {
   }
 
   /**
-   * Adds or replaces the default partition.
+   * Update the prototype partition used when creating new partitions for
+   * this table. New partitions will inherit storage properties from the
+   * provided descriptor.
    */
-  public void addDefaultPartition(StorageDescriptor storageDescriptor)
+  public void setPrototypePartition(StorageDescriptor storageDescriptor)
       throws CatalogException {
-    // Default partition has no files and is not referred to by scan nodes. Data sinks
-    // refer to this to understand how to create new partitions.
     HdfsStorageDescriptor hdfsStorageDescriptor =
         HdfsStorageDescriptor.fromStorageDescriptor(this.name_, storageDescriptor);
-    HdfsPartition partition = HdfsPartition.defaultPartition(this, hdfsStorageDescriptor);
-    partitionMap_.put(partition.getId(), partition);
+    prototypePartition_ = HdfsPartition.prototypePartition(this, hdfsStorageDescriptor);
   }
 
   @Override
@@ -1304,14 +1304,15 @@ public class HdfsTable extends Table implements FeFsTable {
   /**
    * Updates the file metadata of an unpartitioned HdfsTable.
    */
-  private void updateUnpartitionedTableFileMd() throws Exception {
+  private void updateUnpartitionedTableFileMd() throws CatalogException {
+    Preconditions.checkState(getNumClusteringCols() == 0);
     if (LOG.isTraceEnabled()) {
       LOG.trace("update unpartitioned table: " + getFullName());
     }
     resetPartitions();
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
-    addDefaultPartition(msTbl.getSd());
+    setPrototypePartition(msTbl.getSd());
     HdfsPartition part = createPartition(msTbl.getSd(), null);
     addPartition(part);
     if (isMarkedCached_) part.markCached();
@@ -1354,8 +1355,6 @@ public class HdfsTable extends Table implements FeFsTable {
     // Identify dirty partitions that need to be loaded from the Hive Metastore and
     // partitions that no longer exist in the Hive Metastore.
     for (HdfsPartition partition: partitionMap_.values()) {
-      // Ignore the default partition
-      if (partition.isDefaultPartition()) continue;
       // Remove partitions that don't exist in the Hive Metastore. These are partitions
       // that were removed from HMS using some external process, e.g. Hive.
       if (!msPartitionNames.contains(partition.getPartitionName())) {
@@ -1439,12 +1438,11 @@ public class HdfsTable extends Table implements FeFsTable {
   @Override
   public void setTableStats(org.apache.hadoop.hive.metastore.api.Table msTbl) {
     super.setTableStats(msTbl);
-    // For unpartitioned tables set the numRows in its partitions
+    // For unpartitioned tables set the numRows in its single partition
     // to the table's numRows.
     if (numClusteringCols_ == 0 && !partitionMap_.isEmpty()) {
-      // Unpartitioned tables have a 'dummy' partition and a default partition.
-      // Temp tables used in CTAS statements have one partition.
-      Preconditions.checkState(partitionMap_.size() == 2 || partitionMap_.size() == 1);
+      // Unpartitioned tables have a default partition.
+      Preconditions.checkState(partitionMap_.size() == 1);
       for (HdfsPartition p: partitionMap_.values()) {
         p.setNumRows(getNumRows());
       }
@@ -1663,6 +1661,9 @@ public class HdfsTable extends Table implements FeFsTable {
             HdfsPartition.fromThrift(this, part.getKey(), part.getValue());
         addPartition(hdfsPart);
       }
+      prototypePartition_ = HdfsPartition.fromThrift(this,
+          CatalogObjectsConstants.PROTOTYPE_PARTITION_ID,
+          hdfsTable.prototype_partition);
     } catch (CatalogException e) {
       throw new TableLoadingException(e.getMessage());
     }
@@ -1739,11 +1740,13 @@ public class HdfsTable extends Table implements FeFsTable {
     }
     if (includeFileDesc) fileMetadataStats_.set(stats);
 
+    THdfsPartition prototypePartition = prototypePartition_.toThrift(false, false);
+
     memUsageEstimate += fileMetadataStats_.numFiles * PER_FD_MEM_USAGE_BYTES +
         fileMetadataStats_.numBlocks * PER_BLOCK_MEM_USAGE_BYTES;
     setEstimatedMetadataSize(memUsageEstimate);
     THdfsTable hdfsTable = new THdfsTable(hdfsBaseDir_, getColumnNames(),
-        nullPartitionKeyValue_, nullColumnValue_, idToPartition);
+        nullPartitionKeyValue_, nullColumnValue_, idToPartition, prototypePartition);
     hdfsTable.setAvroSchema(avroSchema_);
     hdfsTable.setMultiple_filesystems(multipleFileSystems_);
     if (includeFileDesc) {
@@ -1771,8 +1774,12 @@ public class HdfsTable extends Table implements FeFsTable {
    * Returns the file format that the majority of partitions are stored in.
    */
   public HdfsFileFormat getMajorityFormat() {
+    // In the case that we have no partitions added to the table yet, it's
+    // important to add the "prototype" partition as a fallback.
+    Iterable<HdfsPartition> partitionsToConsider = Iterables.concat(
+        partitionMap_.values(), Collections.singleton(prototypePartition_));
     Map<HdfsFileFormat, Integer> numPartitionsByFormat = Maps.newHashMap();
-    for (HdfsPartition partition: partitionMap_.values()) {
+    for (HdfsPartition partition: partitionsToConsider) {
       HdfsFileFormat format = partition.getInputFormatDescriptor().getFileFormat();
       Integer numPartitions = numPartitionsByFormat.get(format);
       if (numPartitions == null) {
@@ -1804,7 +1811,6 @@ public class HdfsTable extends Table implements FeFsTable {
     HashSet<List<LiteralExpr>> existingPartitions = new HashSet<List<LiteralExpr>>();
     // Get the list of partition values of existing partitions in Hive Metastore.
     for (HdfsPartition partition: partitionMap_.values()) {
-      if (partition.isDefaultPartition()) continue;
       existingPartitions.add(partition.getPartitionValues());
     }
 
@@ -2001,8 +2007,6 @@ public class HdfsTable extends Table implements FeFsTable {
 
     long totalCachedBytes = 0L;
     for (HdfsPartition p: orderedPartitions) {
-      // Ignore dummy default partition.
-      if (p.isDefaultPartition()) continue;
       TResultRowBuilder rowBuilder = new TResultRowBuilder();
 
       // Add the partition-key values (as strings for simplicity).
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 16861af..de382b1 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -1204,7 +1204,7 @@ public class HdfsScanNode extends ScanNode {
     if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
       if (tbl_.getNumClusteringCols() == 0) numPartitions_ = 1;
       output.append(String.format("%spartitions=%s/%s files=%s size=%s", detailPrefix,
-          numPartitions_, table.getPartitions().size() - 1, totalFiles_,
+          numPartitions_, table.getPartitions().size(), totalFiles_,
           PrintUtils.printBytes(totalBytes_)));
       output.append("\n");
       if (!conjuncts_.isEmpty()) {
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index 331c910..42236a3 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -1231,9 +1231,7 @@ public class SingleNodePlanner {
 
       for (FeFsPartition partition: partitions) {
         // Ignore empty partitions to match the behavior of the scan based approach.
-        if (partition.isDefaultPartition() || partition.getSize() == 0) {
-          continue;
-        }
+        if (partition.getSize() == 0) continue;
         List<Expr> exprs = Lists.newArrayList();
         for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
           // UnionNode.init() will go through all the slots in the tuple descriptor so
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index f846e60..f0b0adb 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -90,7 +90,6 @@ import org.apache.impala.common.InternalException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.Reference;
 import org.apache.impala.compat.MetastoreShim;
-import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.thrift.JniCatalogConstants;
 import org.apache.impala.thrift.TAlterDbParams;
 import org.apache.impala.thrift.TAlterDbSetOwnerParams;
@@ -822,7 +821,6 @@ public class CatalogOpExecutor {
     for (FeFsPartition fePartition: table.getPartitions()) {
       // TODO(todd): avoid downcast to implementation class
       HdfsPartition partition = (HdfsPartition)fePartition;
-      if (partition.isDefaultPartition()) continue;
 
       // NULL keys are returned as 'NULL' in the partition_stats map, so don't substitute
       // this partition's keys with Hive's replacement value.
@@ -1271,11 +1269,6 @@ public class CatalogOpExecutor {
       // TODO(todd): avoid downcast
       HdfsPartition part = (HdfsPartition) fePart;
       boolean isModified = false;
-      // The default partition is an Impala-internal abstraction and is not
-      // represented in the Hive Metastore.
-      if (part.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-        continue;
-      }
       if (part.getPartitionStats() != null) {
         PartitionStatsUtil.deletePartStats(part);
         isModified = true;
@@ -1538,7 +1531,6 @@ public class CatalogOpExecutor {
       try {
         HdfsTable hdfsTable = (HdfsTable)table;
         for (FeFsPartition part: hdfsTable.getPartitions()) {
-          if (part.isDefaultPartition()) continue;
           FileSystemUtil.deleteAllVisibleFiles(new Path(part.getLocation()));
         }
 
@@ -2355,9 +2347,9 @@ public class CatalogOpExecutor {
       org.apache.hadoop.hive.metastore.api.Table msTbl =
           tbl.getMetaStoreTable().deepCopy();
       setStorageDescriptorFileFormat(msTbl.getSd(), fileFormat);
-      // The default partition must be updated if the file format is changed so that new
+      // The prototype partition must be updated if the file format is changed so that new
       // partitions are created with the new file format.
-      if (tbl instanceof HdfsTable) ((HdfsTable) tbl).addDefaultPartition(msTbl.getSd());
+      if (tbl instanceof HdfsTable) ((HdfsTable) tbl).setPrototypePartition(msTbl.getSd());
       applyAlterTable(msTbl, true);
       reloadFileMetadata = true;
     } else {
@@ -2394,9 +2386,9 @@ public class CatalogOpExecutor {
           tbl.getMetaStoreTable().deepCopy();
       StorageDescriptor sd = msTbl.getSd();
       HiveStorageDescriptorFactory.setSerdeInfo(rowFormat, sd.getSerdeInfo());
-      // The default partition must be updated if the row format is changed so that new
+      // The prototype partition must be updated if the row format is changed so that new
       // partitions are created with the new file format.
-      ((HdfsTable) tbl).addDefaultPartition(msTbl.getSd());
+      ((HdfsTable) tbl).setPrototypePartition(msTbl.getSd());
       applyAlterTable(msTbl, true);
       reloadFileMetadata = true;
     } else {
@@ -2580,11 +2572,6 @@ public class CatalogOpExecutor {
         for (FeFsPartition fePartition: hdfsTable.getPartitions()) {
           // TODO(todd): avoid downcast
           HdfsPartition partition = (HdfsPartition) fePartition;
-          // No need to cache the default partition because it contains no files and is
-          // not referred to by scan nodes.
-          if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-            continue;
-          }
           // Only issue cache directives if the data is uncached or the cache directive
           // needs to be updated
           if (!partition.isMarkedCached() ||
@@ -2637,9 +2624,6 @@ public class CatalogOpExecutor {
         for (FeFsPartition fePartition: hdfsTable.getPartitions()) {
           // TODO(todd): avoid downcast
           HdfsPartition partition = (HdfsPartition) fePartition;
-          if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-            continue;
-          }
           if (partition.isMarkedCached()) {
             HdfsCachingUtil.removePartitionCacheDirective(partition);
             try {
@@ -3304,11 +3288,6 @@ public class CatalogOpExecutor {
             Sets.newHashSet(update.getCreated_partitions());
         partsToLoadMetadata = Sets.newHashSet(partsToCreate);
         for (FeFsPartition partition: ((HdfsTable) table).getPartitions()) {
-          // Skip dummy default partition.
-          long partitionId = partition.getId();
-          if (partitionId == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-            continue;
-          }
           // TODO: In the BE we build partition names without a trailing char. In FE
           // we build partition name with a trailing char. We should make this
           // consistent.
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index 3750149..bbb1993 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -25,7 +25,7 @@ import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.testutil.CatalogServiceTestCatalog;
-import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.THBaseTable;
 import org.apache.impala.thrift.THdfsPartition;
@@ -37,6 +37,7 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 /**
@@ -59,6 +60,9 @@ public class CatalogObjectToFromThriftTest {
                         "functional_seq"};
     for (String dbName: dbNames) {
       Table table = catalog_.getOrLoadTable(dbName, "alltypes");
+      Assert.assertEquals(24, ((HdfsTable)table).getPartitions().size());
+      Assert.assertEquals(24, ((HdfsTable)table).getPartitionIds().size());
+
       TTable thriftTable = getThriftTable(table);
       Assert.assertEquals(thriftTable.tbl_name, "alltypes");
       Assert.assertEquals(thriftTable.db_name, dbName);
@@ -68,17 +72,17 @@ public class CatalogObjectToFromThriftTest {
       THdfsTable hdfsTable = thriftTable.getHdfs_table();
       Assert.assertTrue(hdfsTable.hdfsBaseDir != null);
 
-      // The table has 24 partitions + the default partition
-      Assert.assertEquals(hdfsTable.getPartitions().size(), 25);
-      Assert.assertTrue(hdfsTable.getPartitions().containsKey(
-          new Long(ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID)));
-
+      // The table has 24 partitions.
+      Assert.assertEquals(24, hdfsTable.getPartitions().size());
+      Assert.assertFalse(hdfsTable.getPartitions().containsKey(
+          CatalogObjectsConstants.PROTOTYPE_PARTITION_ID));
+      // The prototype partition should be included and set properly.
+      Assert.assertTrue(hdfsTable.isSetPrototype_partition());
+      Assert.assertEquals(CatalogObjectsConstants.PROTOTYPE_PARTITION_ID,
+          hdfsTable.getPrototype_partition().id);
+      Assert.assertNull(hdfsTable.getPrototype_partition().location);
       for (Map.Entry<Long, THdfsPartition> kv: hdfsTable.getPartitions().entrySet()) {
-        if (kv.getKey() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-          Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 0);
-        } else {
-          Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 2);
-        }
+        Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 2);
       }
 
       // Now try to load the thrift struct.
@@ -90,29 +94,26 @@ public class CatalogObjectToFromThriftTest {
       if (dbName.equals("functional")) Assert.assertEquals(7300, newTable.getNumRows());
 
       HdfsTable newHdfsTable = (HdfsTable) newTable;
-      Assert.assertEquals(newHdfsTable.getPartitions().size(), 25);
-      boolean foundDefaultPartition = false;
+      Assert.assertEquals(newHdfsTable.getPartitions().size(), 24);
+      Assert.assertEquals(newHdfsTable.getPartitionIds().size(), 24);
       for (FeFsPartition hdfsPart: newHdfsTable.getPartitions()) {
-        if (hdfsPart.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-          Assert.assertEquals(foundDefaultPartition, false);
-          foundDefaultPartition = true;
+        Assert.assertEquals(hdfsPart.getFileDescriptors().size(), 1);
+        Assert.assertTrue(
+            hdfsPart.getFileDescriptors().get(0).getNumFileBlocks() > 0);
+
+        // Verify the partition access level is getting set properly. The alltypes_seq
+        // table has two partitions that are read_only.
+        if (dbName.equals("functional_seq") && (
+            hdfsPart.getPartitionName().equals("year=2009/month=1") ||
+            hdfsPart.getPartitionName().equals("year=2009/month=3"))) {
+          Assert.assertEquals(TAccessLevel.READ_ONLY, hdfsPart.getAccessLevel());
         } else {
-          Assert.assertEquals(hdfsPart.getFileDescriptors().size(), 1);
-          Assert.assertTrue(
-              hdfsPart.getFileDescriptors().get(0).getNumFileBlocks() > 0);
-
-          // Verify the partition access level is getting set properly. The alltypes_seq
-          // table has two partitions that are read_only.
-          if (dbName.equals("functional_seq") && (
-              hdfsPart.getPartitionName().equals("year=2009/month=1") ||
-              hdfsPart.getPartitionName().equals("year=2009/month=3"))) {
-            Assert.assertEquals(TAccessLevel.READ_ONLY, hdfsPart.getAccessLevel());
-          } else {
-            Assert.assertEquals(TAccessLevel.READ_WRITE, hdfsPart.getAccessLevel());
-          }
+          Assert.assertEquals(TAccessLevel.READ_WRITE, hdfsPart.getAccessLevel());
         }
       }
-      Assert.assertEquals(foundDefaultPartition, true);
+      Assert.assertNotNull(newHdfsTable.prototypePartition_);
+      Assert.assertEquals(((HdfsTable)table).prototypePartition_.getParameters(),
+          newHdfsTable.prototypePartition_.getParameters());
     }
   }
 
@@ -214,13 +215,9 @@ public class CatalogObjectToFromThriftTest {
     HdfsTable hdfsTable = (HdfsTable) table;
     // Get any partition with valid HMS parameters to create a
     // dummy partition.
-    HdfsPartition part = null;
-    for (FeFsPartition partition: hdfsTable.getPartitions()) {
-      if (!partition.isDefaultPartition()) {
-        part = (HdfsPartition) partition;
-        break;
-      }
-    }
+    HdfsPartition part = (HdfsPartition)Iterables.getFirst(
+        hdfsTable.getPartitions(), null);
+    Assert.assertNotNull(part);
     // Create a dummy partition with an invalid decimal type.
     try {
       HdfsPartition dummyPart = new HdfsPartition(hdfsTable, part.toHmsPartition(),
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
index f8a2682..4126b72 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
@@ -17,7 +17,6 @@
 
 package org.apache.impala.catalog;
 
-import static org.apache.impala.thrift.ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -291,14 +290,10 @@ public class CatalogTest {
     Collection<? extends FeFsPartition> partitions = table.getPartitions();
 
     // check that partition keys cover the date range 1/1/2009-12/31/2010
-    // and that we have one file per partition, plus the default partition
-    assertEquals(25, partitions.size());
+    // and that we have one file per partition.
+    assertEquals(24, partitions.size());
     Set<Long> months = Sets.newHashSet();
     for (FeFsPartition p: partitions) {
-      if (p.getId() == DEFAULT_PARTITION_ID) {
-        continue;
-      }
-
       assertEquals(2, p.getPartitionValues().size());
 
       LiteralExpr key1Expr = p.getPartitionValues().get(0);
diff --git a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
index 6f3533a..2472961 100644
--- a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
+++ b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
@@ -181,7 +181,7 @@ public class FrontendTestBase {
       }
       try {
         HdfsTable hdfsTable = (HdfsTable) dummyTable;
-        hdfsTable.addDefaultPartition(msTbl.getSd());
+        hdfsTable.setPrototypePartition(msTbl.getSd());
       } catch (CatalogException e) {
         e.printStackTrace();
         fail("Failed to add test table:\n" + createTableSql);
diff --git a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
index 820b33a..07ed153 100644
--- a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
+++ b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
@@ -73,11 +73,6 @@ public class BlockIdGenerator {
           // Write the output as <tablename>: <blockid1> <blockid2> <etc>
           writer.write(tableName + ":");
           for (FeFsPartition partition: hdfsTable.getPartitions()) {
-            // Ignore the default partition.
-            if (partition.getId() ==
-                    ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-              continue;
-            }
             List<FileDescriptor> fileDescriptors = partition.getFileDescriptors();
             for (FileDescriptor fd : fileDescriptors) {
               Path p = new Path(partition.getLocation(), fd.getFileName());